aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/bundler.zig437
-rw-r--r--src/c.zig6
-rw-r--r--src/cli.zig151
-rw-r--r--src/darwin_c.zig8
-rw-r--r--src/fs.zig28
-rw-r--r--src/global.zig1
-rw-r--r--src/http.zig387
-rw-r--r--src/js_lexer.zig4
-rw-r--r--src/js_printer.zig368
-rw-r--r--src/linker.zig43
-rw-r--r--src/options.zig197
-rw-r--r--src/string_immutable.zig4
-rw-r--r--src/string_mutable.zig18
-rw-r--r--src/timer.zig2
14 files changed, 1088 insertions, 566 deletions
diff --git a/src/bundler.zig b/src/bundler.zig
index d7abbd84c..f4777c345 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -28,34 +28,72 @@ const Linker = linker.Linker;
const Timer = @import("./timer.zig");
pub const ServeResult = struct {
- value: Value,
- free: bool = true,
+ file: options.OutputFile,
mime_type: MimeType,
-
- // Either we:
- // - send pre-buffered asset body
- // - stream a file from the file system
- pub const Value = union(Tag) {
- file: File,
- build: options.OutputFile,
- none: u0,
-
- pub const Tag = enum {
- file,
- build,
- none,
- };
-
- pub const File = struct {
- absolute_path: string,
- handle: std.fs.File,
- };
- };
};
// const BundleMap =
pub const ResolveResults = ThreadSafeHashMap.ThreadSafeStringHashMap(Resolver.Resolver.Result);
pub const ResolveQueue = std.fifo.LinearFifo(Resolver.Resolver.Result, std.fifo.LinearFifoBufferType.Dynamic);
+
+// How it works end-to-end
+// 1. Resolve a file path from input using the resolver
+// 2. Look at the extension of that file path, and determine a loader
+// 3. If the loader is .js, .jsx, .ts, .tsx, or .json, run it through our JavaScript Parser
+// IF serving via HTTP and it's parsed without errors:
+// 4. If parsed without errors, generate a strong ETag & write the output directly to the network socket in the Printer.
+// 7. Else, write any errors to error page
+// IF writing to disk AND it's parsed without errors:
+// 4. Write the output to a temporary file.
+// Why? Two reasons.
+// 1. At this point, we don't know what the best output path is.
+// Most of the time, you want the shortest common path, which you can't know until you've
+// built & resolved all paths.
+// Consider this directory tree:
+// - /Users/jarred/Code/app/src/index.tsx
+// - /Users/jarred/Code/app/src/Button.tsx
+// - /Users/jarred/Code/app/assets/logo.png
+// - /Users/jarred/Code/app/src/Button.css
+// - /Users/jarred/Code/app/node_modules/react/index.js
+// - /Users/jarred/Code/app/node_modules/react/cjs/react.development.js
+// Remember that we cannot know which paths need to be resolved without parsing the JavaScript.
+// If we stopped here: /Users/jarred/Code/app/src/Button.tsx
+// We would choose /Users/jarred/Code/app/src/ as the directory
+// Then, that would result in a directory structure like this:
+// - /Users/jarred/Code/app/src/Users/jarred/Code/app/node_modules/react/cjs/react.development.js
+// Which is absolutely insane
+//
+// 2. We will need to write to disk at some point!
+// - If we delay writing to disk, we need to print & allocate a potentially quite large
+// buffer (react-dom.development.js is 550 KB)
+// ^ This is how it used to work!
+// - If we delay printing, we need to keep the AST around. Which breaks all our
+// recycling logic since that could be many many ASTs.
+// 5. Once all files are written, determine the shortest common path
+// 6. Move all the temporary files to their intended destinations
+// IF writing to disk AND it's a file-like loader
+// 4. Hash the contents
+// - rewrite_paths.put(absolute_path, hash(file(absolute_path)))
+// 5. Resolve any imports of this file to that hash(file(absolute_path))
+// 6. Append to the files array with the new filename
+// 7. When parsing & resolving is over, just copy the file.
+// - on macOS, ensure it does an APFS shallow clone so that doesn't use disk space
+// IF serving via HTTP AND it's a file-like loader:
+// 4. Hash the metadata ${absolute_path}-${fstat.mtime}-${fstat.size}
+// 5. Use a deterministic prefix so we know what file to look for without copying it
+// Example scenario:
+// GET /logo-SIU3242.png
+// 404 Not Found because there is no file named "logo-SIu3242.png"
+// Instead, we can do this:
+// GET /public/SIU3242/logo.png
+// Our server sees "/public/" and knows the next segment will be a token
+// which lets it ignore that when resolving the absolute path on disk
+// 6. Compare the current hash with the expected hash
+// 7. IF does not match, do a 301 Temporary Redirect to the new file path
+// This adds an extra network request for outdated files, but that should be uncommon.
+// 7. IF does match, serve it with that hash as a weak ETag
+// 8. This should also just work unprefixed, but that will be served Cache-Control: private, no-store
+
pub const Bundler = struct {
options: options.BundleOptions,
log: *logger.Log,
@@ -119,61 +157,120 @@ pub const Bundler = struct {
);
}
- pub fn buildWithResolveResult(bundler: *Bundler, resolve_result: Resolver.Resolver.Result) !?options.OutputFile {
+ pub fn resetStore(bundler: *Bundler) void {
+ js_ast.Expr.Data.Store.reset();
+ js_ast.Stmt.Data.Store.reset();
+ }
+
+ pub fn buildWithResolveResult(
+ bundler: *Bundler,
+ resolve_result: Resolver.Resolver.Result,
+ allocator: *std.mem.Allocator,
+ loader: options.Loader,
+ comptime Writer: type,
+ writer: Writer,
+ ) !usize {
+ if (resolve_result.is_external) {
+ return 0;
+ }
+
+ errdefer bundler.resetStore();
+
+ var file_path = resolve_result.path_pair.primary;
+ file_path.pretty = allocator.dupe(u8, bundler.fs.relativeTo(file_path.text)) catch unreachable;
+
+ var old_bundler_allocator = bundler.allocator;
+ bundler.allocator = allocator;
+ defer bundler.allocator = old_bundler_allocator;
+ var result = bundler.parse(allocator, file_path, loader, resolve_result.dirname_fd) orelse {
+ bundler.resetStore();
+ return 0;
+ };
+ var old_linker_allocator = bundler.linker.allocator;
+ defer bundler.linker.allocator = old_linker_allocator;
+ bundler.linker.allocator = allocator;
+ try bundler.linker.link(file_path, &result);
+
+ return try bundler.print(
+ result,
+ Writer,
+ writer,
+ );
+ // output_file.version = if (resolve_result.is_from_node_modules) resolve_result.package_json_version else null;
+
+ }
+
+ pub fn buildWithResolveResultEager(bundler: *Bundler, resolve_result: Resolver.Resolver.Result) !?options.OutputFile {
if (resolve_result.is_external) {
return null;
}
+
errdefer js_ast.Expr.Data.Store.reset();
errdefer js_ast.Stmt.Data.Store.reset();
// Step 1. Parse & scan
const loader = bundler.options.loaders.get(resolve_result.path_pair.primary.name.ext) orelse .file;
var file_path = resolve_result.path_pair.primary;
-
file_path.pretty = Linker.relative_paths_list.append(bundler.fs.relativeTo(file_path.text)) catch unreachable;
- var result = bundler.parse(file_path, loader, resolve_result.dirname_fd) orelse {
- js_ast.Expr.Data.Store.reset();
- js_ast.Stmt.Data.Store.reset();
- return null;
- };
- try bundler.linker.link(file_path, &result);
+ switch (loader) {
+ .jsx, .tsx, .js, .json => {
+ var result = bundler.parse(bundler.allocator, file_path, loader, resolve_result.dirname_fd) orelse {
+ js_ast.Expr.Data.Store.reset();
+ js_ast.Stmt.Data.Store.reset();
+ return null;
+ };
- var output_file = try bundler.print(
- result,
- );
- // output_file.version = if (resolve_result.is_from_node_modules) resolve_result.package_json_version else null;
+ try bundler.linker.link(file_path, &result);
+ var output_file = options.OutputFile{
+ .input = file_path,
+ .loader = loader,
+ .value = undefined,
+ };
+
+ const output_dir = bundler.options.output_dir_handle.?;
+ if (std.fs.path.dirname(file_path.pretty)) |dirname| {
+ try output_dir.makePath(dirname);
+ }
+
+ var file = try output_dir.createFile(file_path.pretty, .{});
+ output_file.size = try bundler.print(
+ result,
+ js_printer.FileWriter,
+ js_printer.NewFileWriter(file),
+ );
- return output_file;
+ var file_op = options.OutputFile.FileOperation.fromFile(file.handle, file_path.pretty);
+ file_op.dir = output_dir.fd;
+ file_op.fd = file.handle;
+
+ if (bundler.fs.fs.needToCloseFiles()) {
+ file.close();
+ file_op.fd = 0;
+ }
+ file_op.is_tmpdir = false;
+ output_file.value = .{ .move = file_op };
+ return output_file;
+ },
+ // TODO:
+ else => {
+ return null;
+ },
+ }
}
pub fn print(
bundler: *Bundler,
result: ParseResult,
- ) !options.OutputFile {
- var allocator = bundler.allocator;
- var parts = &([_]string{result.source.path.text});
- var abs_path = bundler.fs.abs(parts);
- var rel_path = bundler.fs.relativeTo(abs_path);
- var pathname = Fs.PathName.init(rel_path);
-
- if (bundler.options.out_extensions.get(pathname.ext)) |ext| {
- pathname.ext = ext;
- }
-
- var stack_fallback = std.heap.stackFallback(1024, bundler.allocator);
-
- var stack = stack_fallback.get();
- var _out_path = std.fmt.allocPrint(stack, "{s}{s}{s}{s}", .{ pathname.dir, std.fs.path.sep_str, pathname.base, pathname.ext }) catch unreachable;
- defer stack.free(_out_path);
- var out_path = bundler.fs.filename_store.append(_out_path) catch unreachable;
-
+ comptime Writer: type,
+ writer: Writer,
+ ) !usize {
const ast = result.ast;
-
var symbols: [][]js_ast.Symbol = &([_][]js_ast.Symbol{ast.symbols});
- const print_result = try js_printer.printAst(
- allocator,
+ return try js_printer.printAst(
+ Writer,
+ writer,
ast,
js_ast.Symbol.Map.initList(symbols),
&result.source,
@@ -185,22 +282,15 @@ pub const Bundler = struct {
},
&bundler.linker,
);
- // allocator.free(result.source.contents);
-
- return options.OutputFile{
- .path = out_path,
- .contents = print_result.js,
- };
}
pub const ParseResult = struct {
source: logger.Source,
loader: options.Loader,
-
ast: js_ast.Ast,
};
- pub fn parse(bundler: *Bundler, path: Fs.Path, loader: options.Loader, dirname_fd: StoredFileDescriptorType) ?ParseResult {
+ pub fn parse(bundler: *Bundler, allocator: *std.mem.Allocator, path: Fs.Path, loader: options.Loader, dirname_fd: StoredFileDescriptorType) ?ParseResult {
if (enableTracing) {
bundler.timer.start();
}
@@ -212,6 +302,7 @@ pub const Bundler = struct {
}
var result: ParseResult = undefined;
const entry = bundler.resolver.caches.fs.readFile(bundler.fs, path.text, dirname_fd) catch return null;
+
const source = logger.Source.initFile(Fs.File{ .path = path, .contents = entry.contents }, bundler.allocator) catch return null;
switch (loader) {
@@ -219,7 +310,7 @@ pub const Bundler = struct {
var jsx = bundler.options.jsx;
jsx.parse = loader.isJSX();
var opts = js_parser.Parser.Options.init(jsx, loader);
- const value = (bundler.resolver.caches.js.parse(bundler.allocator, opts, bundler.options.define, bundler.log, &source) catch null) orelse return null;
+ const value = (bundler.resolver.caches.js.parse(allocator, opts, bundler.options.define, bundler.log, &source) catch null) orelse return null;
return ParseResult{
.ast = value,
.source = source,
@@ -227,14 +318,14 @@ pub const Bundler = struct {
};
},
.json => {
- var expr = json_parser.ParseJSON(&source, bundler.log, bundler.allocator) catch return null;
- var stmt = js_ast.Stmt.alloc(bundler.allocator, js_ast.S.ExportDefault{
+ var expr = json_parser.ParseJSON(&source, bundler.log, allocator) catch return null;
+ var stmt = js_ast.Stmt.alloc(allocator, js_ast.S.ExportDefault{
.value = js_ast.StmtOrExpr{ .expr = expr },
.default_name = js_ast.LocRef{ .loc = logger.Loc{}, .ref = Ref{} },
}, logger.Loc{ .start = 0 });
- var stmts = bundler.allocator.alloc(js_ast.Stmt, 1) catch unreachable;
+ var stmts = allocator.alloc(js_ast.Stmt, 1) catch unreachable;
stmts[0] = stmt;
- var parts = bundler.allocator.alloc(js_ast.Part, 1) catch unreachable;
+ var parts = allocator.alloc(js_ast.Part, 1) catch unreachable;
parts[0] = js_ast.Part{ .stmts = stmts };
return ParseResult{
@@ -282,6 +373,7 @@ pub const Bundler = struct {
defer bundler.log = original_bundler_logger;
defer bundler.resolver.log = original_resolver_logger;
bundler.log = log;
+ bundler.linker.allocator = allocator;
bundler.resolver.log = log;
// Resolving a public file has special behavior
@@ -353,22 +445,32 @@ pub const Bundler = struct {
break;
}
- if (_file) |file| {
- const _parts = [_]string{ bundler.options.public_dir, relative_unrooted_path };
+ if (_file) |*file| {
+ var stat = try file.stat();
+ var absolute_path = resolve_path.joinAbs(bundler.options.public_dir, .auto, relative_unrooted_path);
+
+ if (stat.kind == .SymLink) {
+ absolute_path = try std.fs.realpath(absolute_path, &tmp_buildfile_buf);
+ file.close();
+ file.* = try std.fs.openFileAbsolute(absolute_path, .{ .read = true });
+ stat = try file.stat();
+ }
+
+ if (stat.kind != .File) {
+ file.close();
+ return error.NotFile;
+ }
+
return ServeResult{
- .value = ServeResult.Value{ .file = .{
- .absolute_path = try bundler.fs.joinAlloc(allocator, &_parts),
- .handle = file,
- } },
- .mime_type = MimeType.byExtension(extension),
+ .file = options.OutputFile.initFile(file.*, absolute_path, stat.size),
+ .mime_type = MimeType.byExtension(std.fs.path.extension(absolute_path)[1..]),
};
}
}
if (strings.eqlComptime(relative_path, "__runtime.js")) {
return ServeResult{
- .free = false,
- .value = .{ .build = .{ .path = "__runtime.js", .contents = runtime.SourceContent } },
+ .file = options.OutputFile.initBuf(runtime.SourceContent, "__runtime.js", .js),
.mime_type = MimeType.javascript,
};
}
@@ -394,20 +496,27 @@ pub const Bundler = struct {
const resolved = (try bundler.resolver.resolve(bundler.fs.top_level_dir, absolute_path, .entry_point));
const loader = bundler.options.loaders.get(resolved.path_pair.primary.name.ext) orelse .file;
- const output = switch (loader) {
- .js, .jsx, .ts, .tsx, .json => ServeResult.Value{
- .build = (try bundler.buildWithResolveResult(resolved)) orelse return error.BuildFailed,
- },
- else => ServeResult.Value{ .file = ServeResult.Value.File{
- .absolute_path = resolved.path_pair.primary.text,
- .handle = try std.fs.openFileAbsolute(resolved.path_pair.primary.text, .{ .read = true, .write = false }),
- } },
- };
- return ServeResult{
- .value = output,
- .mime_type = MimeType.byLoader(loader, resolved.path_pair.primary.name.ext),
- };
+ switch (loader) {
+ .js, .jsx, .ts, .tsx, .json => {
+ return ServeResult{
+ .file = options.OutputFile.initPending(loader, resolved),
+ .mime_type = MimeType.byLoader(
+ loader,
+ bundler.options.out_extensions.get(resolved.path_pair.primary.name.ext) orelse resolved.path_pair.primary.name.ext,
+ ),
+ };
+ },
+ else => {
+ var abs_path = resolved.path_pair.primary.text;
+ const file = try std.fs.openFileAbsolute(abs_path, .{ .read = true });
+ var stat = try file.stat();
+ return ServeResult{
+ .file = options.OutputFile.initFile(file, abs_path, stat.size),
+ .mime_type = MimeType.byLoader(loader, abs_path),
+ };
+ },
+ }
}
pub fn bundle(
@@ -418,6 +527,8 @@ pub const Bundler = struct {
var bundler = try Bundler.init(allocator, log, opts);
bundler.configureLinker();
+ if (bundler.options.write and bundler.options.output_dir.len > 0) {}
+
// 100.00 µs std.fifo.LinearFifo(resolver.resolver.Result,std.fifo.LinearFifoBufferType { .Dynamic = {}}).writeItemAssumeCapacity
if (bundler.options.resolve_mode != .lazy) {
try bundler.resolve_queue.ensureUnusedCapacity(24);
@@ -435,30 +546,8 @@ pub const Bundler = struct {
var entry_point_i: usize = 0;
for (bundler.options.entry_points) |_entry| {
var entry: string = _entry;
- // if (!std.fs.path.isAbsolute(_entry)) {
- // const _paths = [_]string{ bundler.fs.top_level_dir, _entry };
- // entry = std.fs.path.join(allocator, &_paths) catch unreachable;
- // } else {
- // entry = allocator.dupe(u8, _entry) catch unreachable;
- // }
-
- // const dir = std.fs.path.dirname(entry) orelse continue;
- // const base = std.fs.path.basename(entry);
-
- // var dir_entry = try rfs.readDirectory(dir);
- // if (std.meta.activeTag(dir_entry) == .err) {
- // log.addErrorFmt(null, logger.Loc.Empty, allocator, "Failed to read directory: {s} - {s}", .{ dir, @errorName(dir_entry.err.original_err) }) catch unreachable;
- // continue;
- // }
-
- // const file_entry = dir_entry.entries.get(base) orelse continue;
- // if (file_entry.entry.kind(rfs) != .file) {
- // continue;
- // }
if (!strings.startsWith(entry, "./")) {
- // allocator.free(entry);
-
// Entry point paths without a leading "./" are interpreted as package
// paths. This happens because they go through general path resolution
// like all other import paths so that plugins can run on them. Requiring
@@ -508,7 +597,7 @@ pub const Bundler = struct {
while (bundler.resolve_queue.readItem()) |item| {
js_ast.Expr.Data.Store.reset();
js_ast.Stmt.Data.Store.reset();
- const output_file = bundler.buildWithResolveResult(item) catch continue orelse continue;
+ const output_file = bundler.buildWithResolveResultEager(item) catch continue orelse continue;
bundler.output_files.append(output_file) catch unreachable;
}
},
@@ -522,10 +611,9 @@ pub const Bundler = struct {
// }
if (bundler.linker.any_needs_runtime) {
- try bundler.output_files.append(options.OutputFile{
- .path = bundler.linker.runtime_source_path,
- .contents = runtime.SourceContent,
- });
+ try bundler.output_files.append(
+ options.OutputFile.initBuf(runtime.SourceContent, bundler.linker.runtime_source_path, .js),
+ );
}
if (enableTracing) {
@@ -538,7 +626,9 @@ pub const Bundler = struct {
);
}
- return try options.TransformResult.init(try allocator.dupe(u8, bundler.result.outbase), bundler.output_files.toOwnedSlice(), log, allocator);
+ var final_result = try options.TransformResult.init(try allocator.dupe(u8, bundler.result.outbase), bundler.output_files.toOwnedSlice(), log, allocator);
+ final_result.root_dir = bundler.options.output_dir_handle;
+ return final_result;
}
};
@@ -601,76 +691,78 @@ pub const Transformer = struct {
var ulimit: usize = Fs.FileSystem.RealFS.adjustUlimit();
var care_about_closing_files = !(FeatureFlags.store_file_descriptors and opts.entry_points.len * 2 < ulimit);
- for (opts.entry_points) |entry_point, i| {
- if (use_arenas) {
- arena = std.heap.ArenaAllocator.init(allocator);
- chosen_alloc = &arena.allocator;
- }
- defer {
- if (use_arenas) {
- arena.deinit();
- }
- }
+ for (opts.entry_points) |entry_point, i| {}
- var _log = logger.Log.init(allocator);
- var __log = &_log;
- const absolutePath = resolve_path.joinAbs(cwd, .auto, entry_point);
+ return try options.TransformResult.init(output_dir, output_files.toOwnedSlice(), log, allocator);
+ }
- const file = try std.fs.openFileAbsolute(absolutePath, std.fs.File.OpenFlags{ .read = true });
- defer {
- if (care_about_closing_files) {
- file.close();
- }
- }
+ pub fn processEntryPoint(
+ transformer: *Transformer,
+ entry_point: string,
+ i: usize,
+ comptime write_destination_type: options.WriteDestination,
+ ) !void {
+ var allocator = transformer.allocator;
+ var log = transformer.log;
- const stat = try file.stat();
+ var _log = logger.Log.init(allocator);
+ var __log = &_log;
+ const absolutePath = resolve_path.joinAbs(cwd, .auto, entry_point);
- // 1 byte sentinel
- const code = try file.readToEndAlloc(allocator, stat.size);
- defer {
- if (_log.msgs.items.len == 0) {
- allocator.free(code);
- }
- _log.appendTo(log) catch {};
+ const file = try std.fs.openFileAbsolute(absolutePath, std.fs.File.OpenFlags{ .read = true });
+ defer {
+ if (care_about_closing_files) {
+ file.close();
}
- const _file = Fs.File{ .path = Fs.Path.init(entry_point), .contents = code };
- var source = try logger.Source.initFile(_file, chosen_alloc);
- var loader: options.Loader = undefined;
- if (use_default_loaders) {
- loader = options.defaultLoaders.get(std.fs.path.extension(absolutePath)) orelse continue;
- } else {
- loader = options.Loader.forFileName(
- entry_point,
- loader_map,
- ) orelse continue;
+ }
+
+ const stat = try file.stat();
+
+ const code = try file.readToEndAlloc(allocator, stat.size);
+ defer {
+ if (_log.msgs.items.len == 0) {
+ allocator.free(code);
}
+ _log.appendTo(log) catch {};
+ }
+ const _file = Fs.File{ .path = Fs.Path.init(entry_point), .contents = code };
+ var source = try logger.Source.initFile(_file, allocator);
+ var loader: options.Loader = undefined;
+ if (use_default_loaders) {
+ loader = options.defaultLoaders.get(std.fs.path.extension(absolutePath)) orelse return;
+ } else {
+ loader = options.Loader.forFileName(
+ entry_point,
+ loader_map,
+ ) orelse return;
+ }
- jsx.parse = loader.isJSX();
+ jsx.parse = loader.isJSX();
- const parser_opts = js_parser.Parser.Options.init(jsx, loader);
- var _source = &source;
- const res = _transform(chosen_alloc, allocator, __log, parser_opts, loader, define, _source) catch continue;
+ const parser_opts = js_parser.Parser.Options.init(jsx, loader);
+ var _source = &source;
- const relative_path = resolve_path.relative(cwd, absolutePath);
- const out_path = resolve_path.joinAbs2(cwd, .auto, absolutePath, relative_path);
- try output_files.append(options.OutputFile{ .path = allocator.dupe(u8, out_path) catch continue, .contents = res.js });
- js_ast.Expr.Data.Store.reset();
- js_ast.Stmt.Data.Store.reset();
- }
+ const relative_path = resolve_path.relative(cwd, absolutePath);
+ const out_path = resolve_path.joinAbs(cwd, .auto, absolutePath, relative_path);
- return try options.TransformResult.init(output_dir, output_files.toOwnedSlice(), log, allocator);
+ switch (write_destination_type) {}
+
+ try output_files.append();
+ js_ast.Expr.Data.Store.reset();
+ js_ast.Stmt.Data.Store.reset();
}
pub fn _transform(
allocator: *std.mem.Allocator,
- result_allocator: *std.mem.Allocator,
log: *logger.Log,
opts: js_parser.Parser.Options,
loader: options.Loader,
- define: *Define,
- source: *logger.Source,
- ) !js_printer.PrintResult {
+ define: *const Define,
+ source: *const logger.Source,
+ comptime Writer: type,
+ writer: Writer,
+ ) !usize {
var ast: js_ast.Ast = undefined;
switch (loader) {
@@ -704,7 +796,8 @@ pub const Transformer = struct {
var symbols: [][]js_ast.Symbol = &([_][]js_ast.Symbol{ast.symbols});
return try js_printer.printAst(
- result_allocator,
+ Writer,
+ writer,
ast,
js_ast.Symbol.Map.initList(symbols),
source,
diff --git a/src/c.zig b/src/c.zig
new file mode 100644
index 000000000..1ac7f44ea
--- /dev/null
+++ b/src/c.zig
@@ -0,0 +1,6 @@
+const std = @import("std");
+
+pub usingnamespace switch (std.Target.current.os.tag) {
+ .macos => @import("./darwin_c.zig"),
+ else => struct {},
+};
diff --git a/src/cli.zig b/src/cli.zig
index 34a00d285..a69ac8833 100644
--- a/src/cli.zig
+++ b/src/cli.zig
@@ -319,87 +319,90 @@ pub const Cli = struct {
},
}
var did_write = false;
+ var stderr_writer = stderr.writer();
+ var buffered_writer = std.io.bufferedWriter(stderr_writer);
+ defer buffered_writer.flush() catch {};
+ var writer = buffered_writer.writer();
+ var err_writer = writer;
- var writer = stdout.writer();
var open_file_limit: usize = 32;
if (args.write) |write| {
if (write) {
+ const root_dir = result.root_dir orelse unreachable;
if (std.os.getrlimit(.NOFILE)) |limit| {
open_file_limit = limit.cur;
} else |err| {}
- did_write = true;
- var root_dir = std.fs.openDirAbsolute(result.outbase, std.fs.Dir.OpenDirOptions{}) catch brk: {
- std.fs.makeDirAbsolute(result.outbase) catch |err| {
- Output.printErrorln("error: Unable to mkdir \"{s}\": \"{s}\"", .{ result.outbase, @errorName(err) });
- std.os.exit(1);
- };
-
- var handle = std.fs.openDirAbsolute(result.outbase, std.fs.Dir.OpenDirOptions{}) catch |err2| {
- Output.printErrorln("error: Unable to open \"{s}\": \"{s}\"", .{ result.outbase, @errorName(err2) });
- std.os.exit(1);
- };
- break :brk handle;
- };
- // On posix, file handles automatically close on process exit by the OS
- // Closing files shows up in profiling.
- // So don't do that unless we actually need to.
- const do_we_need_to_close = !FeatureFlags.store_file_descriptors or (@intCast(usize, root_dir.fd) + open_file_limit) < result.output_files.len;
-
- defer {
- if (do_we_need_to_close) {
- root_dir.close();
- }
+ var all_paths = try allocator.alloc([]const u8, result.output_files.len);
+ var max_path_len: usize = 0;
+ var max_padded_size: usize = 0;
+ for (result.output_files) |f, i| {
+ all_paths[i] = f.input.text;
}
- for (result.output_files) |f| {
- var fp = f.path;
- if (fp[0] == std.fs.path.sep) {
- fp = fp[1..];
- }
-
- var _handle = root_dir.createFile(fp, std.fs.File.CreateFlags{
- .truncate = true,
- }) catch |err| brk: {
- // Only bother to create the directory if there's an error because that's probably why it errored
- if (std.fs.path.dirname(fp)) |dirname| {
- root_dir.makePath(dirname) catch {};
- }
+ var from_path = resolve_path.longestCommonPath(all_paths);
- // Then, retry!
- break :brk (root_dir.createFile(fp, std.fs.File.CreateFlags{
- .truncate = true,
- }) catch |err2| return err2);
- };
+ for (result.output_files) |f, i| {
+ max_path_len = std.math.max(
+ f.input.text[from_path.len..].len + 2,
+ max_path_len,
+ );
+ }
- try _handle.seekTo(0);
+ did_write = true;
- if (FeatureFlags.disable_filesystem_cache) {
- _ = std.os.fcntl(_handle.handle, std.os.F_NOCACHE, 1) catch 0;
- }
+ // On posix, file handles automatically close on process exit by the OS
+ // Closing files shows up in profiling.
+ // So don't do that unless we actually need to.
+ const do_we_need_to_close = !FeatureFlags.store_file_descriptors or (@intCast(usize, root_dir.fd) + open_file_limit) < result.output_files.len;
- defer {
- if (do_we_need_to_close) {
- _handle.close();
- }
+ var filepath_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
+ filepath_buf[0] = '.';
+ filepath_buf[1] = '/';
+
+ for (result.output_files) |f, i| {
+ var rel_path: []const u8 = undefined;
+ switch (f.value) {
+ // easy mode: write the buffer
+ .buffer => |value| {
+ rel_path = resolve_path.relative(from_path, f.input.text);
+
+ try root_dir.writeFile(rel_path, value);
+ },
+ .move => |value| {
+ // const primary = f.input.text[from_path.len..];
+ // std.mem.copy(u8, filepath_buf[2..], primary);
+ // rel_path = filepath_buf[0 .. primary.len + 2];
+ rel_path = value.pathname;
+
+ // try f.moveTo(result.outbase, constStrToU8(rel_path), root_dir.fd);
+ },
+ .copy => |value| {
+ const rel_path_base = resolve_path.relativeToCommonPath(
+ from_path,
+ from_path,
+ f.input.text,
+ filepath_buf[2..],
+ comptime resolve_path.Platform.auto.separator(),
+ false,
+ );
+ rel_path = filepath_buf[0 .. rel_path_base.len + 2];
+
+ try f.copyTo(result.outbase, constStrToU8(rel_path), root_dir.fd);
+ },
+ .noop => {},
+ .pending => |value| {
+ unreachable;
+ },
}
- try _handle.writeAll(f.contents);
- }
-
- var max_path_len: usize = 0;
- var max_padded_size: usize = 0;
- for (result.output_files) |file| {
- max_path_len = std.math.max(file.path.len, max_path_len);
- }
-
- _ = try writer.write("\n");
- for (result.output_files) |file| {
- const padding_count = 2 + (max_path_len - file.path.len);
+ // Print summary
+ _ = try writer.write("\n");
+ const padding_count = 2 + (std.math.max(rel_path.len, max_path_len) - rel_path.len);
try writer.writeByteNTimes(' ', 2);
- try writer.writeAll(file.path);
+ try writer.writeAll(rel_path);
try writer.writeByteNTimes(' ', padding_count);
- const size = @intToFloat(f64, file.contents.len) / 1000.0;
+ const size = @intToFloat(f64, f.size) / 1000.0;
try std.fmt.formatFloatDecimal(size, .{ .precision = 2 }, writer);
try writer.writeAll(" KB\n");
}
@@ -407,25 +410,15 @@ pub const Cli = struct {
}
if (isDebug) {
- Output.errorLn("Expr count: {d}", .{js_ast.Expr.icount});
- Output.errorLn("Stmt count: {d}", .{js_ast.Stmt.icount});
- Output.errorLn("Binding count: {d}", .{js_ast.Binding.icount});
- Output.errorLn("File Descriptors: {d} / {d}", .{
+ err_writer.print("\nExpr count: {d}\n", .{js_ast.Expr.icount}) catch {};
+ err_writer.print("Stmt count: {d}\n", .{js_ast.Stmt.icount}) catch {};
+ err_writer.print("Binding count: {d}\n", .{js_ast.Binding.icount}) catch {};
+ err_writer.print("File Descriptors: {d} / {d}\n", .{
fs.FileSystem.max_fd,
open_file_limit,
- });
+ }) catch {};
}
- if (!did_write) {
- for (result.output_files) |file, i| {
- try stdout.writeAll(file.contents);
- if (i > 0) {
- _ = try writer.write("\n\n");
- }
- }
- }
-
- var err_writer = stderr.writer();
for (result.errors) |err| {
try err.writeFormat(err_writer);
_ = try err_writer.write("\n");
@@ -442,7 +435,5 @@ pub const Cli = struct {
var elapsed = @divTrunc(duration, @as(i128, std.time.ns_per_ms));
try err_writer.print("\nCompleted in {d}ms", .{elapsed});
}
-
- std.os.exit(0);
}
};
diff --git a/src/darwin_c.zig b/src/darwin_c.zig
new file mode 100644
index 000000000..21080ea85
--- /dev/null
+++ b/src/darwin_c.zig
@@ -0,0 +1,8 @@
+pub usingnamespace @import("std").c.builtins;
+
+// int clonefileat(int src_dirfd, const char * src, int dst_dirfd, const char * dst, int flags);
+pub extern "c" fn clonefileat(c_int, [*c]const u8, c_int, [*c]const u8, uint32_t: c_int) c_int;
+// int fclonefileat(int srcfd, int dst_dirfd, const char * dst, int flags);
+pub extern "c" fn fclonefileat(c_int, c_int, [*c]const u8, uint32_t: c_int) c_int;
+// int clonefile(const char * src, const char * dst, int flags);
+pub extern "c" fn clonefile([*c]const u8, [*c]const u8, uint32_t: c_int) c_int;
diff --git a/src/fs.zig b/src/fs.zig
index 2d5d270c2..0cde895bc 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -30,6 +30,22 @@ pub const FileSystem = struct {
dirname_store: *DirnameStore,
filename_store: *FilenameStore,
+ _tmpdir: ?std.fs.Dir = null,
+
+ pub fn tmpdir(fs: *FileSystem) std.fs.Dir {
+ if (fs._tmpdir == null) {
+ fs._tmpdir = fs.fs.openTmpDir() catch unreachable;
+ }
+
+ return fs._tmpdir.?;
+ }
+
+ var tmpname_buf: [64]u8 = undefined;
+ pub fn tmpname(fs: *const FileSystem, extname: string) !string {
+ const int = std.crypto.random.int(u64);
+ return try std.fmt.bufPrint(&tmpname_buf, "{x}{s}", .{ int, extname });
+ }
+
pub var max_fd: FileDescriptorType = 0;
pub inline fn setMaxFd(fd: anytype) void {
@@ -388,6 +404,18 @@ pub const FileSystem = struct {
file_limit: usize = 32,
file_quota: usize = 32,
+ pub var tmpdir_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
+ pub var tmpdir_path: []const u8 = undefined;
+ pub fn openTmpDir(fs: *const RealFS) !std.fs.Dir {
+ if (isMac) {
+ var tmpdir_base = std.os.getenv("TMPDIR") orelse "/private/tmp";
+ tmpdir_path = try std.fs.realpath(tmpdir_base, &tmpdir_buf);
+ return try std.fs.openDirAbsolute(tmpdir_path, .{ .access_sub_paths = true, .iterate = true });
+ } else {
+ @compileError("Implement openTmpDir");
+ }
+ }
+
pub fn needToCloseFiles(rfs: *const RealFS) bool {
// On Windows, we must always close open file handles
// Windows locks files
diff --git a/src/global.zig b/src/global.zig
index 96ce1b447..8822a60d7 100644
--- a/src/global.zig
+++ b/src/global.zig
@@ -16,6 +16,7 @@ pub const build_target: BuildTarget = comptime {
pub const isWasm = build_target == .wasm;
pub const isNative = build_target == .native;
pub const isWasi = build_target == .wasi;
+pub const isMac = build_target == .native and std.Target.current.os.tag == .macos;
pub const isBrowser = !isWasi and isWasm;
pub const isWindows = std.Target.current.os.tag == .windows;
diff --git a/src/http.zig b/src/http.zig
index 5bd0a825a..24dbca10a 100644
--- a/src/http.zig
+++ b/src/http.zig
@@ -21,6 +21,7 @@ const Headers = picohttp.Headers;
const MimeType = @import("http/mime_type.zig");
const Bundler = bundler.Bundler;
+const js_printer = @import("js_printer.zig");
const SOCKET_FLAGS = os.SOCK_CLOEXEC;
threadlocal var req_headers_buf: [100]picohttp.Header = undefined;
@@ -270,7 +271,9 @@ pub const RequestContext = struct {
}
pub fn sendNotFound(req: *RequestContext) !void {
- return req.writeStatus(404);
+ try req.writeStatus(404);
+ try req.flushHeaders();
+ req.done();
}
pub fn sendInternalError(ctx: *RequestContext, err: anytype) !void {
@@ -313,7 +316,7 @@ pub const RequestContext = struct {
break :brk std.fmt.bufPrintIntToSlice(&buf, file_chunk_size, 16, true, .{}).len;
};
- threadlocal var file_chunk_buf: [chunk_preamble_len + 2 + file_chunk_size]u8 = undefined;
+ threadlocal var file_chunk_buf: [chunk_preamble_len + 2]u8 = undefined;
threadlocal var symlink_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
threadlocal var weak_etag_buffer: [100]u8 = undefined;
threadlocal var strong_etag_buffer: [100]u8 = undefined;
@@ -331,7 +334,12 @@ pub const RequestContext = struct {
}
pub fn handleGet(ctx: *RequestContext) !void {
- const result = try ctx.bundler.buildFile(&ctx.log, ctx.allocator, ctx.url.path, ctx.url.extname);
+ const result = try ctx.bundler.buildFile(
+ &ctx.log,
+ ctx.allocator,
+ ctx.url.path,
+ ctx.url.extname,
+ );
ctx.mime_type = result.mime_type;
ctx.appendHeader("Content-Type", result.mime_type.value);
@@ -341,58 +349,246 @@ pub const RequestContext = struct {
const send_body = ctx.method == .GET;
- switch (result.value) {
- .none => {
- unreachable;
- },
- .file => |file| {
- defer file.handle.close();
- var do_extra_close = false;
- var handle = file.handle;
-
- var real_path = file.absolute_path;
-
- // Assume "stat" is lying to us.
- // Don't write a 2xx status until we've successfully read at least 1 byte
- var stat = try handle.stat();
- switch (stat.kind) {
- .Directory,
- .NamedPipe,
- .UnixDomainSocket,
- .Whiteout,
- .BlockDevice,
- .CharacterDevice,
- => {
- ctx.log.addErrorFmt(null, logger.Loc.Empty, ctx.allocator, "Bad file type: {s}", .{@tagName(stat.kind)}) catch {};
- try ctx.sendBadRequest();
- return;
- },
- .SymLink => {
- const real_file_path = try std.fs.realpath(file.absolute_path, &symlink_buffer);
- real_path = real_file_path;
- handle = try std.fs.openFileAbsolute(real_file_path, .{});
- stat = try handle.stat();
- do_extra_close = true;
- },
- else => {},
+ switch (result.file.value) {
+ .pending => |resolve_result| {
+ if (resolve_result.is_external) {
+ try ctx.sendBadRequest();
+ return;
}
- defer {
- if (do_extra_close) {
- handle.close();
+
+ const SocketPrinterInternal = struct {
+ const SocketPrinterInternal = @This();
+ rctx: *RequestContext,
+ threadlocal var buffer: MutableString = undefined;
+ threadlocal var has_loaded_buffer: bool = false;
+
+ pub fn init(rctx: *RequestContext) SocketPrinterInternal {
+ // if (isMac) {
+ // _ = std.os.fcntl(file.handle, std.os.F_NOCACHE, 1) catch 0;
+ // }
+
+ if (!has_loaded_buffer) {
+ buffer = MutableString.init(std.heap.c_allocator, 0) catch unreachable;
+ has_loaded_buffer = true;
+ }
+
+ buffer.reset();
+
+ return SocketPrinterInternal{
+ .rctx = rctx,
+ };
}
- }
- var file_chunk_slice = file_chunk_buf[chunk_preamble_len .. file_chunk_buf.len - 3];
+ pub fn writeByte(_ctx: *SocketPrinterInternal, byte: u8) anyerror!usize {
+ try buffer.appendChar(byte);
+ return 1;
+ }
+ pub fn writeAll(_ctx: *SocketPrinterInternal, bytes: anytype) anyerror!usize {
+ try buffer.append(bytes);
+ return bytes.len;
+ }
+
+ pub fn done(
+ chunky: *SocketPrinterInternal,
+ ) anyerror!void {
+ const buf = buffer.toOwnedSliceLeaky();
+ defer buffer.reset();
+
+ if (buf.len == 0) {
+ try chunky.rctx.sendNoContent();
+ return;
+ }
+
+ if (FeatureFlags.strong_etags_for_built_files) {
+ if (buf.len < 16 * 16 * 16 * 16) {
+ const strong_etag = std.hash.Wyhash.hash(1, buf);
+ const etag_content_slice = std.fmt.bufPrintIntToSlice(strong_etag_buffer[0..49], strong_etag, 16, true, .{});
+
+ chunky.rctx.appendHeader("ETag", etag_content_slice);
+
+ if (chunky.rctx.header("If-None-Match")) |etag_header| {
+ if (std.mem.eql(u8, etag_content_slice, etag_header.value)) {
+ try chunky.rctx.sendNotModified();
+ return;
+ }
+ }
+ }
+ }
+
+ try chunky.rctx.writeStatus(200);
+ try chunky.rctx.prepareToSendBody(buf.len, false);
+ try chunky.rctx.writeBodyBuf(buf);
+ chunky.rctx.done();
+ }
+
+ pub fn flush(
+ _ctx: *SocketPrinterInternal,
+ ) anyerror!void {}
+ };
+
+ const SocketPrinter = js_printer.NewWriter(SocketPrinterInternal, SocketPrinterInternal.writeByte, SocketPrinterInternal.writeAll);
+
+ // const ChunkedTransferEncoding = struct {
+ // rctx: *RequestContext,
+ // has_disconnected: bool = false,
+ // chunk_written: usize = 0,
+ // pushed_chunks_count: usize = 0,
+ // disabled: bool = false,
+
+ // threadlocal var chunk_buf: [8096]u8 = undefined;
+ // threadlocal var chunk_header_buf: [32]u8 = undefined;
+ // threadlocal var chunk_footer_buf: [2]u8 = undefined;
+
+ // pub fn create(rctx: *RequestContext) @This() {
+ // return @This(){
+ // .rctx = rctx,
+ // };
+ // }
+
+ // pub fn writeByte(chunky: *@This(), byte: u8) anyerror!usize {
+ // return try chunky.writeAll(&[_]u8{byte});
+ // }
+ // pub fn writeAll(chunky: *@This(), bytes: anytype) anyerror!usize {
+ // // This lets us check if disabled without an extra branch
+ // const dest_chunk_written = (bytes.len + chunky.chunk_written) * @intCast(usize, @boolToInt(!chunky.disabled));
+ // switch (dest_chunk_written) {
+ // 0 => {
+ // return 0;
+ // },
+ // // Fast path
+ // 1...chunk_buf.len => {
+ // std.mem.copy(u8, chunk_buf[chunky.chunk_written..dest_chunk_written], bytes);
+ // chunky.chunk_written = dest_chunk_written;
+ // return bytes.len;
+ // },
+ // // Slow path
+ // else => {
+ // var byte_slice: []const u8 = bytes[0..bytes.len];
+ // while (byte_slice.len > 0) {
+ // var remainder_slice = chunk_buf[chunky.chunk_written..];
+ // const copied_size = std.math.min(remainder_slice.len, byte_slice.len);
+
+ // std.mem.copy(u8, remainder_slice, byte_slice[0..copied_size]);
+ // byte_slice = byte_slice[copied_size..];
+
+ // chunky.chunk_written += copied_size;
+
+ // if (chunky.chunk_written >= chunk_buf.len) {
+ // chunky.flush() catch |err| {
+ // return err;
+ // };
+ // }
+ // }
+ // return bytes.len;
+ // },
+ // }
+ // }
+
+ // pub fn flush(chunky: *@This()) anyerror!void {
+ // if (!chunky.rctx.has_written_last_header) {
+ // try chunky.rctx.writeStatus(200);
+ // try chunky.rctx.prepareToSendBody(0, true);
+ // }
+
+ // // how much are we pushing?
+ // // remember, this won't always be a full chunk size
+ // const content_length = chunky.chunk_written;
+ // // it could be zero if it's the final chunk
+ // var content_length_buf_size = std.fmt.formatIntBuf(&chunk_header_buf, content_length, 16, true, .{});
+ // var after_content_length = chunk_header_buf[content_length_buf_size..];
+ // after_content_length[0] = '\r';
+ // after_content_length[1] = '\n';
+
+ // var written = try chunky.rctx.conn.client.write(chunk_header_buf[0 .. content_length_buf_size + 2], SOCKET_FLAGS);
+ // if (written == 0) {
+ // chunky.disabled = true;
+ // return error.SocketClosed;
+ // }
+ // written = try chunky.rctx.conn.client.write(chunk_buf[0..chunky.chunk_written], SOCKET_FLAGS);
+ // chunky.chunk_written = chunky.chunk_written - written;
+
+ // chunky.pushed_chunks_count += 1;
+ // }
+
+ // pub fn done(chunky: *@This()) anyerror!void {
+ // if (chunky.disabled) {
+ // return;
+ // }
+
+ // defer chunky.rctx.done();
+
+ // // Actually, its just one chunk so we'll send it all at once
+ // // instead of using transfer encoding
+ // if (chunky.pushed_chunks_count == 0 and !chunky.rctx.has_written_last_header) {
+
+ // // turns out it's empty!
+ // if (chunky.chunk_written == 0) {
+ // try chunky.rctx.sendNoContent();
+
+ // return;
+ // }
+
+ // const buffer = chunk_buf[0..chunky.chunk_written];
+
+ // if (FeatureFlags.strong_etags_for_built_files) {
+ // const strong_etag = std.hash.Wyhash.hash(1, buffer);
+ // const etag_content_slice = std.fmt.bufPrintIntToSlice(strong_etag_buffer[0..49], strong_etag, 16, true, .{});
+
+ // chunky.rctx.appendHeader("ETag", etag_content_slice);
+
+ // if (chunky.rctx.header("If-None-Match")) |etag_header| {
+ // if (std.mem.eql(u8, etag_content_slice, etag_header.value)) {
+ // try chunky.rctx.sendNotModified();
+ // return;
+ // }
+ // }
+ // }
+
+ // try chunky.rctx.writeStatus(200);
+ // try chunky.rctx.prepareToSendBody(chunky.chunk_written, false);
+ // try chunky.rctx.writeBodyBuf(buffer);
+ // return;
+ // }
+
+ // if (chunky.chunk_written > 0) {
+ // try chunky.flush();
+ // }
+
+ // _ = try chunky.rctx.writeSocket("0\r\n\r\n", SOCKET_FLAGS);
+ // }
+
+ // pub const Writer = js_printer.NewWriter(@This(), writeByte, writeAll);
+ // pub fn writer(chunky: *@This()) Writer {
+ // return Writer.init(chunky.*);
+ // }
+ // };
+
+ var chunked_encoder = SocketPrinter.init(SocketPrinterInternal.init(ctx));
+
+ // It will call flush for us automatically
+ defer ctx.bundler.resetStore();
+ const loader = ctx.bundler.options.loaders.get(resolve_result.path_pair.primary.name.ext) orelse .file;
+ var written = try ctx.bundler.buildWithResolveResult(resolve_result, ctx.allocator, loader, SocketPrinter, chunked_encoder);
+ },
+ .noop => {
+ try ctx.sendNotFound();
+ },
+ .copy, .move => |file| {
+ defer std.os.close(file.fd);
if (result.mime_type.category != .html) {
// hash(absolute_file_path, size, mtime)
var weak_etag = std.hash.Wyhash.init(1);
weak_etag_buffer[0] = 'W';
weak_etag_buffer[1] = '/';
- weak_etag.update(real_path);
- std.mem.writeIntNative(u64, weak_etag_tmp_buffer[0..8], stat.size);
+ weak_etag.update(result.file.input.text);
+ std.mem.writeIntNative(u64, weak_etag_tmp_buffer[0..8], result.file.size);
weak_etag.update(weak_etag_tmp_buffer[0..8]);
- std.mem.writeIntNative(i128, weak_etag_tmp_buffer[0..16], stat.mtime);
- weak_etag.update(weak_etag_tmp_buffer[0..16]);
+
+ if (result.file.mtime) |mtime| {
+ std.mem.writeIntNative(i128, weak_etag_tmp_buffer[0..16], mtime);
+ weak_etag.update(weak_etag_tmp_buffer[0..16]);
+ }
+
const etag_content_slice = std.fmt.bufPrintIntToSlice(weak_etag_buffer[2..], weak_etag.final(), 16, true, .{});
const complete_weak_etag = weak_etag_buffer[0 .. etag_content_slice.len + 2];
@@ -408,94 +604,30 @@ pub const RequestContext = struct {
ctx.appendHeader("Cache-Control", "no-cache");
}
- switch (stat.size) {
+ switch (result.file.size) {
0 => {
try ctx.sendNoContent();
return;
},
- 1...file_chunk_size - 1 => {
+ else => {
defer ctx.done();
- // always report by amount we actually read instead of stat-reported read
- const file_read = try handle.read(file_chunk_slice);
- if (file_read == 0) {
- return ctx.sendNoContent();
- }
-
- const file_slice = file_chunk_slice[0..file_read];
try ctx.writeStatus(200);
- try ctx.prepareToSendBody(file_read, false);
+ try ctx.prepareToSendBody(result.file.size, false);
if (!send_body) return;
- _ = try ctx.writeSocket(file_slice, SOCKET_FLAGS);
- },
- else => {
- var chunk_written: usize = 0;
- var size_slice = file_chunk_buf[0..chunk_preamble_len];
- var trailing_newline_slice = file_chunk_buf[file_chunk_buf.len - 3 ..];
- trailing_newline_slice[0] = '\r';
- trailing_newline_slice[1] = '\n';
- var pushed_chunk_count: usize = 0;
- while (true) : (pushed_chunk_count += 1) {
- defer chunk_written = 0;
-
- // Read from the file until we reach either end of file or the max chunk size
- chunk_written = handle.read(file_chunk_slice) catch |err| {
- if (pushed_chunk_count > 0) {
- _ = try ctx.writeSocket("0\r\n\r\n", SOCKET_FLAGS);
- }
- return ctx.sendInternalError(err);
- };
-
- // empty chunk
- if (chunk_written == 0) {
- defer ctx.done();
- if (pushed_chunk_count == 0) {
- return ctx.sendNoContent();
- }
- _ = try ctx.writeSocket("0\r\n\r\n", SOCKET_FLAGS);
- break;
- // final chunk
- } else if (chunk_written < file_chunk_size - 1) {
- defer ctx.done();
- var hex_size_slice = std.fmt.bufPrintIntToSlice(size_slice, chunk_written, 16, true, .{});
- var remainder_slice = file_chunk_buf[hex_size_slice.len..size_slice.len];
- remainder_slice[0] = '\r';
- remainder_slice[1] = '\n';
- if (pushed_chunk_count == 0) {
- ctx.writeStatus(200) catch {};
- ctx.prepareToSendBody(0, true) catch {};
- if (!send_body) return;
- }
- _ = try ctx.writeSocket(size_slice, SOCKET_FLAGS);
- _ = try ctx.writeSocket(file_chunk_slice[0..chunk_written], SOCKET_FLAGS);
- _ = try ctx.writeSocket(trailing_newline_slice, SOCKET_FLAGS);
- break;
- // full chunk
- } else {
- if (pushed_chunk_count == 0) {
- try ctx.writeStatus(200);
-
- try ctx.prepareToSendBody(0, true);
- if (!send_body) return;
- }
-
- var hex_size_slice = std.fmt.bufPrintIntToSlice(size_slice, chunk_written, 16, true, .{});
- var remainder_slice = file_chunk_buf[hex_size_slice.len..size_slice.len];
- remainder_slice[0] = '\r';
- remainder_slice[1] = '\n';
-
- _ = try ctx.writeSocket(&file_chunk_buf, SOCKET_FLAGS);
- }
- }
+ _ = try std.os.sendfile(
+ ctx.conn.client.socket.fd,
+ file.fd,
+ 0,
+ result.file.size,
+ &[_]std.os.iovec_const{},
+ &[_]std.os.iovec_const{},
+ 0,
+ );
},
}
},
- .build => |output| {
- defer {
- if (result.free) {
- ctx.bundler.allocator.free(output.contents);
- }
- }
+ .buffer => |buffer| {
// The version query string is only included for:
// - The runtime
@@ -509,7 +641,8 @@ pub const RequestContext = struct {
}
if (FeatureFlags.strong_etags_for_built_files) {
- const strong_etag = std.hash.Wyhash.hash(1, output.contents);
+ // TODO: don't hash runtime.js
+ const strong_etag = std.hash.Wyhash.hash(1, buffer);
const etag_content_slice = std.fmt.bufPrintIntToSlice(strong_etag_buffer[0..49], strong_etag, 16, true, .{});
ctx.appendHeader("ETag", etag_content_slice);
@@ -522,15 +655,15 @@ pub const RequestContext = struct {
}
}
- if (output.contents.len == 0) {
+ if (buffer.len == 0) {
return try ctx.sendNoContent();
}
defer ctx.done();
try ctx.writeStatus(200);
- try ctx.prepareToSendBody(output.contents.len, false);
+ try ctx.prepareToSendBody(buffer.len, false);
if (!send_body) return;
- _ = try ctx.writeSocket(output.contents, SOCKET_FLAGS);
+ _ = try ctx.writeSocket(buffer, SOCKET_FLAGS);
},
}
diff --git a/src/js_lexer.zig b/src/js_lexer.zig
index bc9ba69fd..1496221aa 100644
--- a/src/js_lexer.zig
+++ b/src/js_lexer.zig
@@ -1745,7 +1745,7 @@ pub const Lexer = struct {
// TODO: use wtf-8 encoding.
pub fn utf16ToString(lexer: *LexerType, js: JavascriptString) string {
- var temp = std.mem.zeroes([4]u8);
+ var temp: [4]u8 = undefined;
var list = std.ArrayList(u8).initCapacity(lexer.allocator, js.len) catch unreachable;
var i: usize = 0;
while (i < js.len) : (i += 1) {
@@ -1760,7 +1760,7 @@ pub const Lexer = struct {
const width = strings.encodeWTF8Rune(&temp, r1);
list.appendSlice(temp[0..width]) catch unreachable;
}
- return list.toOwnedSlice();
+ return list.items;
// return std.unicode.utf16leToUtf8Alloc(lexer.allocator, js) catch unreachable;
}
diff --git a/src/js_printer.zig b/src/js_printer.zig
index c0ba6ce55..97940ff71 100644
--- a/src/js_printer.zig
+++ b/src/js_printer.zig
@@ -118,7 +118,7 @@ const ExprFlag = packed struct {
}
};
-pub fn NewPrinter(comptime ascii_only: bool) type {
+pub fn NewPrinter(comptime ascii_only: bool, comptime Writer: type) type {
// comptime const comptime_buf_len = 64;
// comptime var comptime_buf = [comptime_buf_len]u8{};
// comptime var comptime_buf_i: usize = 0;
@@ -127,7 +127,6 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
symbols: Symbol.Map,
import_records: []importRecord.ImportRecord,
linker: ?*Linker,
- js: MutableString,
needs_semicolon: bool = false,
stmt_start: i32 = -1,
@@ -140,8 +139,8 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
prev_num_end: i32 = -1,
prev_reg_exp_end: i32 = -1,
call_target: ?Expr.Data = null,
- writer: MutableString.Writer,
- allocator: *std.mem.Allocator,
+ writer: Writer,
+
renamer: rename.Renamer,
prev_stmt_tag: Stmt.Tag = .s_empty,
@@ -200,60 +199,17 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
pub fn print(p: *Printer, str: anytype) void {
switch (@TypeOf(str)) {
- comptime_int => {
- p.js.appendChar(str) catch unreachable;
- },
- string => {
- if (FeatureFlags.disable_printing_null) {
- if (str.len > 0 and str[0] == 0 or (str[0] == '\\' and str[1] == '0')) {
- Global.panic("Attempted to print null char", .{});
- }
- }
-
- p.js.append(str) catch unreachable;
- },
- u8 => {
- if (FeatureFlags.disable_printing_null) {
- if (str == 0) {
- Global.panic("Attempted to print null char", .{});
- }
- }
- p.js.appendChar(str) catch unreachable;
- },
- u16 => {
- if (FeatureFlags.disable_printing_null) {
- if (str == 0) {
- Global.panic("Attempted to print null char", .{});
- }
- }
- p.js.appendChar(@intCast(u8, str)) catch unreachable;
- },
- u21 => {
- if (FeatureFlags.disable_printing_null) {
- if (str == 0) {
- Global.panic("Attempted to print null char", .{});
- }
- }
- p.js.appendChar(@intCast(u8, str)) catch unreachable;
+ comptime_int, u16, u8 => {
+ p.writer.print(@TypeOf(str), str);
},
else => {
- if (FeatureFlags.disable_printing_null) {
- if (str[0] == 0 or (str[0] == '\\' and str[1] == '0')) {
- Global.panic("Attempted to print null char", .{});
- }
- }
- p.js.append(@as(string, str)) catch unreachable;
+ p.writer.print(@TypeOf(str), str);
},
}
}
pub fn unsafePrint(p: *Printer, str: string) void {
- if (FeatureFlags.disable_printing_null) {
- if (str[0] == 0 or (str[0] == '\\' and str[1] == '0')) {
- Global.panic("Attempted to print null char", .{});
- }
- }
- p.js.appendAssumeCapacity(str);
+ p.print(str);
}
pub fn printIndent(p: *Printer) void {
@@ -263,7 +219,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
return;
}
- p.js.growBy(p.options.indent * " ".len) catch unreachable;
+ // p.js.growBy(p.options.indent * " ".len) catch unreachable;
var i: usize = 0;
while (i < p.options.indent) : (i += 1) {
@@ -289,8 +245,8 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
pub fn printSpaceBeforeIdentifier(
p: *Printer,
) void {
- const n = p.js.len();
- if (n > 0 and (js_lexer.isIdentifierContinue(p.js.list.items[n - 1]) or n == p.prev_reg_exp_end)) {
+ const n = p.writer.written;
+ if (n > 0 and (js_lexer.isIdentifierContinue(p.writer.prev_char) or n == p.prev_reg_exp_end)) {
p.print(" ");
}
}
@@ -298,10 +254,10 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
pub fn maybePrintSpace(
p: *Printer,
) void {
- const n = p.js.len();
- if (n <= 0) return;
+ const n = p.writer.written;
+ if (n == 0) return;
- switch (p.js.list.items[n - 1]) {
+ switch (p.writer.prev_char) {
' ', '\n' => {},
else => {
p.print(" ");
@@ -513,7 +469,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
// not in zig! CI pays for it instead
// its probably still doing some unnecessary integer conversion somewhere though
var slice = std.fmt.bufPrint(&parts, "{d}", .{float}) catch unreachable;
- p.js.list.appendSlice(p.allocator, slice) catch unreachable;
+ p.print(slice);
}
pub fn printQuotedUTF16(e: *Printer, text: JavascriptString, quote: u8) void {
@@ -526,7 +482,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
var c: u21 = 0;
var width: u3 = 0;
- e.js.growIfNeeded(text.len) catch unreachable;
+ // e(text.len) catch unreachable;
while (i < n) {
c = text[i];
@@ -927,10 +883,10 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
}
if (has_pure_comment) {
- const was_stmt_start = p.stmt_start == p.js.len();
+ const was_stmt_start = p.stmt_start == p.writer.written;
p.print("/* @__PURE__ */ ");
if (was_stmt_start) {
- p.stmt_start = p.js.lenI();
+ p.stmt_start = p.writer.written;
}
}
// We don't ever want to accidentally generate a direct eval expression here
@@ -1045,7 +1001,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
p.print("?");
}
if (p.canPrintIdentifier(e.name)) {
- if (isOptionalChain and p.prev_num_end == p.js.len()) {
+ if (isOptionalChain and p.prev_num_end == p.writer.written) {
// "1.toString" is a syntax error, so print "1 .toString" instead
p.print(" ");
}
@@ -1147,7 +1103,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
switch (e.body.stmts[0].data) {
.s_return => {
if (e.body.stmts[0].getReturn().value) |val| {
- p.arrow_expr_start = p.js.lenI();
+ p.arrow_expr_start = p.writer.written;
p.printExpr(val, .comma, ExprFlag.None());
wasPrinted = true;
}
@@ -1165,7 +1121,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
}
},
.e_function => |e| {
- const n = p.js.lenI();
+ const n = p.writer.written;
var wrap = p.stmt_start == n or p.export_default_start == n;
if (wrap) {
@@ -1193,7 +1149,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
}
},
.e_class => |e| {
- const n = p.js.lenI();
+ const n = p.writer.written;
var wrap = p.stmt_start == n or p.export_default_start == n;
if (wrap) {
p.print("(");
@@ -1251,7 +1207,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
p.print("]");
},
.e_object => |e| {
- const n = p.js.lenI();
+ const n = p.writer.written;
const wrap = p.stmt_start == n or p.arrow_expr_start == n;
if (wrap) {
@@ -1346,17 +1302,17 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
p.print("`");
},
.e_reg_exp => |e| {
- const n = p.js.len();
+ const n = p.writer.written;
// Avoid forming a single-line comment
- if (n > 0 and p.js.list.items[n - 1] == '/') {
+ if (n > 0 and p.writer.prev_char == '/') {
p.print(" ");
}
p.print(e.value);
// Need a space before the next identifier to avoid it turning into flags
- p.prev_reg_exp_end = p.js.lenI();
+ p.prev_reg_exp_end = p.writer.written;
},
.e_big_int => |e| {
p.printSpaceBeforeIdentifier();
@@ -1385,7 +1341,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
p.printNonNegativeFloat(absValue);
// Remember the end of the latest number
- p.prev_num_end = p.js.lenI();
+ p.prev_num_end = p.writer.written;
} else if (level.gte(.prefix)) {
// Expressions such as "(-1).toString" need to wrap negative numbers.
// Instead of testing for "value < 0" we test for "signbit(value)" and
@@ -1400,12 +1356,12 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
p.printNonNegativeFloat(absValue);
// Remember the end of the latest number
- p.prev_num_end = p.js.lenI();
+ p.prev_num_end = p.writer.written;
}
},
.e_identifier => |e| {
const name = p.renamer.nameForSymbol(e.ref);
- const wrap = p.js.lenI() == p.for_of_init_start and strings.eqlComptime(name, "let");
+ const wrap = p.writer.written == p.for_of_init_start and strings.eqlComptime(name, "let");
if (wrap) {
p.print("(");
@@ -1518,7 +1474,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
p.printSpaceBeforeOperator(e.op);
p.print(entry.text);
p.prev_op = e.op;
- p.prev_op_end = p.js.lenI();
+ p.prev_op_end = p.writer.written;
}
if (e.op.isPrefix()) {
@@ -1534,7 +1490,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
var wrap = level.gte(entry.level) or (e.op == Op.Code.bin_in and flags.forbid_in);
// Destructuring assignments must be parenthesized
- const n = p.js.lenI();
+ const n = p.writer.written;
if (n == p.stmt_start or n == p.arrow_expr_start) {
switch (e.left.data) {
.e_object => {
@@ -1626,7 +1582,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
p.printSpaceBeforeIdentifier();
p.print(entry.text);
p.prev_op = e.op;
- p.prev_op_end = p.js.lenI();
+ p.prev_op_end = p.writer.written;
}
p.printSpace();
@@ -1644,7 +1600,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
}
pub fn printSpaceBeforeOperator(p: *Printer, next: Op.Code) void {
- if (p.prev_op_end == p.js.lenI()) {
+ if (p.prev_op_end == p.writer.written) {
const prev = p.prev_op;
// "+ + y" => "+ +y"
// "+ ++ y" => "+ ++y"
@@ -1656,7 +1612,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
if (((prev == Op.Code.bin_add or prev == Op.Code.un_pos) and (next == Op.Code.bin_add or next == Op.Code.un_pos or next == Op.Code.un_pre_inc)) or
((prev == Op.Code.bin_sub or prev == Op.Code.un_neg) and (next == Op.Code.bin_sub or next == Op.Code.un_neg or next == Op.Code.un_pre_dec)) or
(prev == Op.Code.un_post_dec and next == Op.Code.bin_gt) or
- (prev == Op.Code.un_not and next == Op.Code.un_pre_dec and p.js.len() > 1 and p.js.list.items[p.js.list.items.len - 2] == '<'))
+ (prev == Op.Code.un_not and next == Op.Code.un_pre_dec and p.writer.written > 1 and p.writer.prev_prev_char == '<'))
{
p.print(" ");
}
@@ -2212,7 +2168,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
switch (s.value) {
.expr => |expr| {
// Functions and classes must be wrapped to avoid confusion with their statement forms
- p.export_default_start = p.js.lenI();
+ p.export_default_start = p.writer.written;
p.printExpr(expr, .comma, ExprFlag.None());
p.printSemicolonAfterStatement();
return;
@@ -2443,7 +2399,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
}
p.printSpace();
p.print("(");
- p.for_of_init_start = p.js.lenI();
+ p.for_of_init_start = p.writer.written;
p.printForLoopInit(s.init);
p.printSpace();
p.printSpaceBeforeIdentifier();
@@ -2622,11 +2578,11 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
if (record.wrap_with_to_module) {
if (p.options.runtime_imports.__require) |require_ref| {
p.print("import * as ");
- const module_name_start = p.js.list.items.len;
- const module_name_segment = (fs.PathName.init(record.path.pretty).nonUniqueNameString(p.allocator) catch unreachable)[1..];
+ var module_name_buf: [256]u8 = undefined;
+ var fixed_buf_allocator = std.heap.FixedBufferAllocator.init(&module_name_buf);
+ const module_name_segment = (fs.PathName.init(record.path.pretty).nonUniqueNameString(&fixed_buf_allocator.allocator) catch unreachable)[1..];
p.print(module_name_segment);
p.print("_module");
- const module_name_end = p.js.list.items[module_name_start..].len + module_name_start;
p.print(" from \"");
p.print(record.path.text);
p.print("\";\n");
@@ -2792,7 +2748,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
},
.s_expr => |s| {
p.printIndent();
- p.stmt_start = p.js.lenI();
+ p.stmt_start = p.writer.written;
p.printExpr(s.value, .lowest, ExprFlag.ExprResultIsUnused());
p.printSemicolonAfterStatement();
},
@@ -2948,11 +2904,7 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
}
pub fn printIdentifier(p: *Printer, identifier: string) void {
- if (ascii_only) {
- quoteIdentifier(&p.js, identifier) catch unreachable;
- } else {
- p.print(identifier);
- }
+ p.print(identifier);
}
pub fn printIdentifierUTF16(p: *Printer, name: JavascriptString) !void {
@@ -3005,16 +2957,19 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
}
}
- pub fn init(allocator: *std.mem.Allocator, tree: *const Ast, source: *const logger.Source, symbols: Symbol.Map, opts: Options, linker: ?*Linker) !Printer {
- // Heuristic: most lines of JavaScript are short.
- var js = try MutableString.init(allocator, 0);
+ pub fn init(
+ writer: Writer,
+ tree: *const Ast,
+ source: *const logger.Source,
+ symbols: Symbol.Map,
+ opts: Options,
+ linker: ?*Linker,
+ ) !Printer {
return Printer{
- .allocator = allocator,
.import_records = tree.import_records,
.options = opts,
.symbols = symbols,
- .js = js,
- .writer = js.writer(),
+ .writer = writer,
.linker = linker,
.renamer = rename.Renamer.init(symbols, source),
};
@@ -3022,75 +2977,186 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
};
}
-// TODO:
-pub fn quoteIdentifier(js: *MutableString, identifier: string) !void {
- return try js.append(identifier);
- // assert(identifier.len > 0);
- // var utf8iter = std.unicode.Utf8Iterator{ .bytes = identifier, .i = 0 };
- // try js.growIfNeeded(identifier.len);
-
- // var init = utf8iter.nextCodepoint() orelse unreachable;
- // var ascii_start: usize = if (init >= first_ascii and init <= last_ascii) 0 else std.math.maxInt(usize);
-
- // while (utf8iter.nextCodepoint()) |code_point| {
- // switch (code_point) {
- // first_ascii...last_ascii => {},
- // else => {
- // ascii_start = utf8iter.i;
- // },
- // }
- // }
+pub fn NewWriter(
+ comptime ContextType: type,
+ writeByte: fn (ctx: *ContextType, char: u8) anyerror!usize,
+ writeAll: fn (ctx: *ContextType, buf: anytype) anyerror!usize,
+) type {
+ return struct {
+ const Self = @This();
+ ctx: ContextType,
+ written: i32 = -1,
+ // Used by the printer
+ prev_char: u8 = 0,
+ prev_prev_char: u8 = 0,
+ err: ?anyerror = null,
+ orig_err: ?anyerror = null,
+
+ pub fn init(ctx: ContextType) Self {
+ return .{
+ .ctx = ctx,
+ };
+ }
+
+ pub fn getError(writer: *const Self) anyerror!void {
+ if (writer.orig_err) |orig_err| {
+ return orig_err;
+ }
+
+ if (writer.err) |err| {
+ return err;
+ }
+ }
+
+ pub inline fn print(writer: *Self, comptime ValueType: type, str: ValueType) void {
+ if (FeatureFlags.disable_printing_null) {
+ if (str == 0) {
+ Global.panic("Attempted to print null char", .{});
+ }
+ }
+
+ switch (ValueType) {
+ comptime_int, u16, u8 => {
+ const written = writeByte(&writer.ctx, @intCast(u8, str)) catch |err| brk: {
+ writer.orig_err = err;
+ break :brk 0;
+ };
+
+ writer.written += @intCast(i32, written);
+
+ writer.prev_prev_char = writer.prev_char;
+ writer.prev_char = str;
+
+ writer.err = if (written == 0) error.WriteFailed else writer.err;
+ },
+ else => {
+ const written = writeAll(&writer.ctx, str) catch |err| brk: {
+ writer.orig_err = err;
+ break :brk 0;
+ };
+
+ writer.written += @intCast(i32, written);
+
+ writer.prev_prev_char = if (written > 1) str[written - 1] else if (written == 1) writer.prev_char else writer.prev_prev_char;
+ writer.prev_char = if (written > 1) str[written - 1] else writer.prev_char;
+
+ if (written < str.len) {
+ writer.err = if (written == 0) error.WriteFailed else error.PartialWrite;
+ }
+ },
+ }
+ }
+
+ const hasFlush = std.meta.trait.hasFn("flush");
+ pub fn flush(writer: *Self) !void {
+ if (hasFlush(ContextType)) {
+ try writer.ctx.flush();
+ }
+ }
+ const hasDone = std.meta.trait.hasFn("done");
+ pub fn done(writer: *Self) !void {
+ if (hasDone(ContextType)) {
+ try writer.ctx.done();
+ }
+ }
+ };
}
-const UnicodePrinter = NewPrinter(false);
-const AsciiPrinter = NewPrinter(true);
+pub const DirectWriter = struct {
+ handle: FileDescriptorType,
+
+ pub fn write(writer: *DirectWriter, buf: []const u8) !usize {
+ return try std.os.write(writer.handle, buf);
+ }
+
+ pub fn writeAll(writer: *DirectWriter, buf: []const u8) !void {
+ _ = try std.os.write(writer.handle, buf);
+ }
+ pub const Error = std.os.WriteError;
+};
+
+// Unbuffered 653ms
+// Buffered 65k 47ms
+// Buffered 16k 43ms
+// Buffered 4k 55ms
+const FileWriterInternal = struct {
+ file: std.fs.File,
+ threadlocal var buffer: MutableString = undefined;
+ threadlocal var has_loaded_buffer: bool = false;
+
+ pub fn init(file: std.fs.File) FileWriterInternal {
+ // if (isMac) {
+ // _ = std.os.fcntl(file.handle, std.os.F_NOCACHE, 1) catch 0;
+ // }
+
+ if (!has_loaded_buffer) {
+ buffer = MutableString.init(alloc.dynamic, 0) catch unreachable;
+ has_loaded_buffer = true;
+ }
+
+ buffer.reset();
+
+ return FileWriterInternal{
+ .file = file,
+ };
+ }
+ pub fn writeByte(ctx: *FileWriterInternal, byte: u8) anyerror!usize {
+ try buffer.appendChar(byte);
+ return 1;
+ }
+ pub fn writeAll(ctx: *FileWriterInternal, bytes: anytype) anyerror!usize {
+ try buffer.append(bytes);
+ return bytes.len;
+ }
+
+ pub fn done(
+ ctx: *FileWriterInternal,
+ ) anyerror!void {
+ _ = try ctx.file.writeAll(buffer.toOwnedSliceLeaky());
+ buffer.reset();
+ }
+
+ pub fn flush(
+ ctx: *FileWriterInternal,
+ ) anyerror!void {}
+};
+
+pub const FileWriter = NewWriter(FileWriterInternal, FileWriterInternal.writeByte, FileWriterInternal.writeAll);
+pub fn NewFileWriter(file: std.fs.File) FileWriter {
+ var internal = FileWriterInternal.init(file);
+ return FileWriter.init(internal);
+}
pub fn printAst(
- allocator: *std.mem.Allocator,
+ comptime Writer: type,
+ _writer: Writer,
tree: Ast,
symbols: js_ast.Symbol.Map,
source: *const logger.Source,
ascii_only: bool,
opts: Options,
linker: ?*Linker,
-) !PrintResult {
- if (ascii_only) {
- var printer = try AsciiPrinter.init(
- allocator,
- &tree,
- source,
- symbols,
-
- opts,
- linker,
- );
-
- for (tree.parts) |part| {
- for (part.stmts) |stmt| {
- try printer.printStmt(stmt);
+) !usize {
+ const PrinterType = NewPrinter(false, Writer);
+ var writer = _writer;
+ var printer = try PrinterType.init(
+ writer,
+ &tree,
+ source,
+ symbols,
+ opts,
+ linker,
+ );
+ for (tree.parts) |part| {
+ for (part.stmts) |stmt| {
+ try printer.printStmt(stmt);
+ if (printer.writer.getError()) {} else |err| {
+ return err;
}
}
+ }
- return PrintResult{
- .js = printer.js.toOwnedSliceLeaky(),
- };
- } else {
- var printer = try UnicodePrinter.init(
- allocator,
- &tree,
- source,
- symbols,
- opts,
- linker,
- );
- for (tree.parts) |part| {
- for (part.stmts) |stmt| {
- try printer.printStmt(stmt);
- }
- }
+ try printer.writer.done();
- return PrintResult{
- .js = printer.js.toOwnedSliceLeaky(),
- };
- }
+ return @intCast(usize, std.math.max(printer.writer.written, 0));
}
diff --git a/src/linker.zig b/src/linker.zig
index f9ea44481..5490bad4e 100644
--- a/src/linker.zig
+++ b/src/linker.zig
@@ -198,8 +198,6 @@ pub const Linker = struct {
}
defer relative_path_allocator.reset();
- var pretty = try relative_paths_list.append(linker.fs.relative(source_dir, source_path));
- var pathname = Fs.PathName.init(pretty);
var absolute_pathname = Fs.PathName.init(source_path);
if (!linker.options.preserve_extensions) {
@@ -210,9 +208,13 @@ pub const Linker = struct {
switch (linker.options.import_path_format) {
.relative => {
+ var pretty = try linker.allocator.dupe(u8, linker.fs.relative(source_dir, source_path));
+ var pathname = Fs.PathName.init(pretty);
return Fs.Path.initWithPretty(pretty, pretty);
},
.relative_nodejs => {
+ var pretty = try linker.allocator.dupe(u8, linker.fs.relative(source_dir, source_path));
+ var pathname = Fs.PathName.init(pretty);
var path = Fs.Path.initWithPretty(pretty, pretty);
path.text = path.text[0 .. path.text.len - path.name.ext.len];
return path;
@@ -225,31 +227,28 @@ pub const Linker = struct {
}
if (linker.options.append_package_version_in_query_string and package_version != null) {
- const absolute_url = try relative_paths_list.append(
+ const absolute_url =
try std.fmt.allocPrint(
- &relative_path_allocator.allocator,
- "{s}{s}{s}?v={s}",
- .{
- linker.options.public_url,
- base,
- absolute_pathname.ext,
- package_version.?,
- },
- ),
+ linker.allocator,
+ "{s}{s}{s}?v={s}",
+ .{
+ linker.options.public_url,
+ base,
+ absolute_pathname.ext,
+ package_version.?,
+ },
);
return Fs.Path.initWithPretty(absolute_url, absolute_url);
} else {
- const absolute_url = try relative_paths_list.append(
- try std.fmt.allocPrint(
- &relative_path_allocator.allocator,
- "{s}{s}{s}",
- .{
- linker.options.public_url,
- base,
- absolute_pathname.ext,
- },
- ),
+ const absolute_url = try std.fmt.allocPrint(
+ linker.allocator,
+ "{s}{s}{s}",
+ .{
+ linker.options.public_url,
+ base,
+ absolute_pathname.ext,
+ },
);
return Fs.Path.initWithPretty(absolute_url, absolute_url);
diff --git a/src/options.zig b/src/options.zig
index 17cc147a1..606ef18cb 100644
--- a/src/options.zig
+++ b/src/options.zig
@@ -6,11 +6,19 @@ const resolver = @import("./resolver/resolver.zig");
const api = @import("./api/schema.zig");
const Api = api.Api;
const defines = @import("./defines.zig");
+const resolve_path = @import("./resolver/resolve_path.zig");
usingnamespace @import("global.zig");
const assert = std.debug.assert;
+pub const WriteDestination = enum {
+ stdout,
+ disk,
+ http,
+ // eventaully: wasm
+};
+
pub fn validatePath(log: *logger.Log, fs: *Fs.FileSystem.Implementation, cwd: string, rel_path: string, allocator: *std.mem.Allocator, path_kind: string) string {
if (rel_path.len == 0) {
return "";
@@ -474,6 +482,7 @@ pub const BundleOptions = struct {
public_dir: string = "public",
public_dir_enabled: bool = true,
output_dir: string = "",
+ output_dir_handle: ?std.fs.Dir = null,
public_dir_handle: ?std.fs.Dir = null,
write: bool = false,
preserve_symlinks: bool = false,
@@ -642,6 +651,22 @@ pub const BundleOptions = struct {
}
}
+ if (opts.write and opts.output_dir.len > 0) {
+ opts.output_dir_handle = std.fs.openDirAbsolute(opts.output_dir, std.fs.Dir.OpenDirOptions{}) catch brk: {
+ std.fs.makeDirAbsolute(opts.output_dir) catch |err| {
+ Output.printErrorln("error: Unable to mkdir \"{s}\": \"{s}\"", .{ opts.output_dir, @errorName(err) });
+ std.os.exit(1);
+ };
+
+ var handle = std.fs.openDirAbsolute(opts.output_dir, std.fs.Dir.OpenDirOptions{}) catch |err2| {
+ Output.printErrorln("error: Unable to open \"{s}\": \"{s}\"", .{ opts.output_dir, @errorName(err2) });
+ std.os.exit(1);
+ };
+ break :brk handle;
+ };
+ Fs.FileSystem.setMaxFd(opts.output_dir_handle.?.fd);
+ }
+
return opts;
}
};
@@ -686,7 +711,6 @@ pub const TransformOptions = struct {
if (defaultLoaders.get(entryPoint.path.name.ext)) |defaultLoader| {
loader = defaultLoader;
}
-
assert(code.len > 0);
return TransformOptions{
@@ -700,10 +724,174 @@ pub const TransformOptions = struct {
}
};
+// Instead of keeping files in-memory, we:
+// 1. Write directly to disk
+// 2. (Optional) move the file to the destination
+// This saves us from allocating a buffer
pub const OutputFile = struct {
- path: string,
- version: ?string = null,
- contents: string,
+ loader: Loader,
+ input: Fs.Path,
+ value: Value,
+ size: usize = 0,
+ mtime: ?i128 = null,
+
+ // Depending on:
+ // - The platform
+ // - The number of open file handles
+ // - Whether or not a file of the same name exists
+ // We may use a different system call
+ pub const FileOperation = struct {
+ pathname: string,
+ fd: FileDescriptorType = 0,
+ dir: FileDescriptorType = 0,
+ is_tmpdir: bool = false,
+
+ pub fn fromFile(fd: FileDescriptorType, pathname: string) FileOperation {
+ return .{
+ .pathname = pathname,
+ .fd = fd,
+ };
+ }
+
+ pub fn getPathname(file: *const FileOperation) string {
+ if (file.is_tmpdir) {
+ return resolve_path.joinAbs(@TypeOf(Fs.FileSystem.instance.fs).tmpdir_path, .auto, file.pathname);
+ } else {
+ return file.pathname;
+ }
+ }
+ };
+
+ pub const Value = union(Kind) {
+ buffer: []const u8,
+ move: FileOperation,
+ copy: FileOperation,
+ noop: u0,
+ pending: resolver.Resolver.Result,
+ };
+
+ pub const Kind = enum { move, copy, noop, buffer, pending };
+
+ pub fn initPending(loader: Loader, pending: resolver.Resolver.Result) OutputFile {
+ return .{
+ .loader = .file,
+ .input = pending.path_pair.primary,
+ .size = 0,
+ .value = .{ .pending = pending },
+ };
+ }
+
+ pub fn initFile(file: std.fs.File, pathname: string, size: usize) OutputFile {
+ return .{
+ .loader = .file,
+ .input = Fs.Path.init(pathname),
+ .size = size,
+ .value = .{ .copy = FileOperation.fromFile(file.handle, pathname) },
+ };
+ }
+
+ pub fn initFileWithDir(file: std.fs.File, pathname: string, size: usize, dir: std.fs.Dir) OutputFile {
+ var res = initFile(file, pathname, size);
+ res.value.copy.dir_handle = dir.fd;
+ return res;
+ }
+
+ pub fn initBuf(buf: []const u8, pathname: string, loader: Loader) OutputFile {
+ return .{
+ .loader = loader,
+ .input = Fs.Path.init(pathname),
+ .size = buf.len,
+ .value = .{ .buffer = buf },
+ };
+ }
+
+ pub fn moveTo(file: *const OutputFile, base_path: string, rel_path: []u8, dir: FileDescriptorType) !void {
+ var move = file.value.move;
+ if (move.dir > 0) {
+ std.os.renameat(move.dir, move.pathname, dir, rel_path) catch |err| {
+ const dir_ = std.fs.Dir{ .fd = dir };
+ if (std.fs.path.dirname(rel_path)) |dirname| {
+ dir_.makePath(dirname) catch {};
+ std.os.renameat(move.dir, move.pathname, dir, rel_path) catch {};
+ return;
+ }
+ };
+ return;
+ }
+
+ try std.os.rename(move.pathname, resolve_path.joinAbs(base_path, .auto, rel_path));
+ }
+
+ pub fn copyTo(file: *const OutputFile, base_path: string, rel_path: []u8, dir: FileDescriptorType) !void {
+ var copy = file.value.copy;
+ if (isMac and copy.fd > 0) {
+ // First try using a copy-on-write clonefile()
+ // this will fail if the destination already exists
+ rel_path.ptr[rel_path.len + 1] = 0;
+ var rel_c_path = rel_path.ptr[0..rel_path.len :0];
+ const success = C.fclonefileat(copy.fd, dir, rel_c_path, 0) == 0;
+ if (success) {
+ return;
+ }
+ }
+
+ var dir_obj = std.fs.Dir{ .fd = dir };
+ const file_out = (try dir_obj.createFile(rel_path, .{}));
+
+ const fd_out = file_out.handle;
+ var do_close = false;
+ // TODO: close file_out on error
+ const fd_in = if (copy.fd > 0) copy.fd else (try std.fs.openFileAbsolute(copy.getPathname(), .{ .read = true })).handle;
+
+ if (isNative) {
+ Fs.FileSystem.setMaxFd(fd_out);
+ Fs.FileSystem.setMaxFd(fd_in);
+ do_close = Fs.FileSystem.instance.fs.needToCloseFiles();
+ }
+
+ defer {
+ if (do_close) {
+ std.os.close(fd_out);
+ std.os.close(fd_in);
+ }
+ }
+
+ const os = std.os;
+
+ if (comptime std.Target.current.isDarwin()) {
+ const rc = os.system.fcopyfile(fd_in, fd_out, null, os.system.COPYFILE_DATA);
+ if (os.errno(rc) == 0) {
+ return;
+ }
+ }
+
+ if (std.Target.current.os.tag == .linux) {
+ // Try copy_file_range first as that works at the FS level and is the
+ // most efficient method (if available).
+ var offset: u64 = 0;
+ cfr_loop: while (true) {
+ // The kernel checks the u64 value `offset+count` for overflow, use
+ // a 32 bit value so that the syscall won't return EINVAL except for
+ // impossibly large files (> 2^64-1 - 2^32-1).
+ const amt = try os.copy_file_range(fd_in, offset, fd_out, offset, math.maxInt(u32), 0);
+ // Terminate when no data was copied
+ if (amt == 0) break :cfr_loop;
+ offset += amt;
+ }
+ return;
+ }
+
+ // Sendfile is a zero-copy mechanism iff the OS supports it, otherwise the
+ // fallback code will copy the contents chunk by chunk.
+ const empty_iovec = [0]os.iovec_const{};
+ var offset: u64 = 0;
+ sendfile_loop: while (true) {
+ const amt = try os.sendfile(fd_out, fd_in, offset, 0, &empty_iovec, &empty_iovec, 0);
+ // Terminate when no data was copied
+ if (amt == 0) break :sendfile_loop;
+ offset += amt;
+ }
+ }
};
pub const TransformResult = struct {
@@ -711,6 +899,7 @@ pub const TransformResult = struct {
warnings: []logger.Msg = &([_]logger.Msg{}),
output_files: []OutputFile = &([_]OutputFile{}),
outbase: string,
+ root_dir: ?std.fs.Dir = null,
pub fn init(
outbase: string,
output_files: []OutputFile,
diff --git a/src/string_immutable.zig b/src/string_immutable.zig
index 007d865c3..2e07f04ba 100644
--- a/src/string_immutable.zig
+++ b/src/string_immutable.zig
@@ -211,7 +211,7 @@ pub fn eqlUtf16(comptime self: string, other: JavascriptString) bool {
}
pub fn toUTF8Alloc(allocator: *std.mem.Allocator, js: JavascriptString) !string {
- var temp = std.mem.zeroes([4]u8);
+ var temp: [4]u8 = undefined;
var list = std.ArrayList(u8).initCapacity(allocator, js.len) catch unreachable;
var i: usize = 0;
while (i < js.len) : (i += 1) {
@@ -226,7 +226,7 @@ pub fn toUTF8Alloc(allocator: *std.mem.Allocator, js: JavascriptString) !string
const width = encodeWTF8Rune(&temp, r1);
list.appendSlice(temp[0..width]) catch unreachable;
}
- return list.toOwnedSlice();
+ return list.items;
}
// Check utf16 string equals utf8 string without allocating extra memory
diff --git a/src/string_mutable.zig b/src/string_mutable.zig
index 64ebf7601..825e5b86d 100644
--- a/src/string_mutable.zig
+++ b/src/string_mutable.zig
@@ -102,25 +102,31 @@ pub const MutableString = struct {
}
}
- pub fn growBy(self: *MutableString, amount: usize) callconv(.Inline) !void {
+ pub inline fn growBy(self: *MutableString, amount: usize) !void {
try self.list.ensureUnusedCapacity(self.allocator, amount);
}
- pub fn appendChar(self: *MutableString, char: u8) callconv(.Inline) !void {
+ pub inline fn reset(
+ self: *MutableString,
+ ) void {
+ self.list.shrinkRetainingCapacity(0);
+ }
+
+ pub inline fn appendChar(self: *MutableString, char: u8) !void {
try self.list.append(self.allocator, char);
}
- pub fn appendCharAssumeCapacity(self: *MutableString, char: u8) callconv(.Inline) void {
+ pub inline fn appendCharAssumeCapacity(self: *MutableString, char: u8) void {
self.list.appendAssumeCapacity(char);
}
- pub fn append(self: *MutableString, char: []const u8) callconv(.Inline) !void {
+ pub inline fn append(self: *MutableString, char: []const u8) !void {
try self.list.appendSlice(self.allocator, char);
}
- pub fn appendAssumeCapacity(self: *MutableString, char: []const u8) callconv(.Inline) void {
+ pub inline fn appendAssumeCapacity(self: *MutableString, char: []const u8) void {
self.list.appendSliceAssumeCapacity(
char,
);
}
- pub fn lenI(self: *MutableString) callconv(.Inline) i32 {
+ pub inline fn lenI(self: *MutableString) i32 {
return @intCast(i32, self.list.items.len);
}
diff --git a/src/timer.zig b/src/timer.zig
index cf56cc6ec..39e4a7822 100644
--- a/src/timer.zig
+++ b/src/timer.zig
@@ -16,3 +16,5 @@ pub fn stop(timer: *Timer) void {
pub fn seconds(timer: *const Timer) f64 {
return @intToFloat(f64, timer.elapsed) / std.time.ns_per_s;
}
+
+pub const Group = struct {};