aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-10-08 17:25:31 -0700
committerGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-10-08 17:25:31 -0700
commitf10301884246003573a3aff123e5921e927379a5 (patch)
tree25dad338b5b3044e91b6facfae7af6f8922105a7
parentdbedfa3d06d6148d489e43c3c4405763c1b13097 (diff)
downloadbun-f10301884246003573a3aff123e5921e927379a5.tar.gz
bun-f10301884246003573a3aff123e5921e927379a5.tar.zst
bun-f10301884246003573a3aff123e5921e927379a5.zip
Flatten FS namespace to better facilitate testing
-rw-r--r--src/bundler.zig6
-rw-r--r--src/cache.zig82
-rw-r--r--src/env_loader.zig6
-rw-r--r--src/feature_flags.zig2
-rw-r--r--src/fs.zig1507
-rw-r--r--src/http.zig8
-rw-r--r--src/linker.zig2
-rw-r--r--src/node_module_bundle.zig2
-rw-r--r--src/options.zig2
-rw-r--r--src/resolver/dir_info.zig4
-rw-r--r--src/resolver/resolver.zig22
-rw-r--r--src/router.zig4
12 files changed, 803 insertions, 844 deletions
diff --git a/src/bundler.zig b/src/bundler.zig
index 87f37d220..5bb037239 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -233,7 +233,7 @@ pub const Bundler = struct {
switch (this.options.env.behavior) {
.prefix, .load_all => {
// Step 1. Load the project root.
- var dir: *Fs.FileSystem.DirEntry = ((this.resolver.readDirInfo(this.fs.top_level_dir) catch return) orelse return).getEntries() orelse return;
+ var dir: *Fs.DirEntry = ((this.resolver.readDirInfo(this.fs.top_level_dir) catch return) orelse return).getEntries() orelse return;
// Process always has highest priority.
this.env.loadProcess();
@@ -732,7 +732,7 @@ pub const Bundler = struct {
std.hash.Wyhash.hash(@intCast(usize, std.time.milliTimestamp()) % std.math.maxInt(u32), std.mem.span(destination)),
);
- var tmpfile = Fs.FileSystem.RealFS.Tmpfile{};
+ var tmpfile = Fs.RealFS.Tmpfile{};
try tmpfile.create(&bundler.fs.fs, tmpname);
errdefer tmpfile.closeAndDelete(tmpname);
@@ -2971,7 +2971,7 @@ pub const Transformer = struct {
var arena: std.heap.ArenaAllocator = undefined;
const use_arenas = opts.entry_points.len > 8;
- var ulimit: usize = Fs.FileSystem.RealFS.adjustUlimit() catch unreachable;
+ var ulimit: usize = Fs.RealFS.adjustUlimit() catch unreachable;
var care_about_closing_files = !(FeatureFlags.store_file_descriptors and opts.entry_points.len * 2 < ulimit);
var transformer = Transformer{
diff --git a/src/cache.zig b/src/cache.zig
index 8bd1221f7..53f854449 100644
--- a/src/cache.zig
+++ b/src/cache.zig
@@ -18,7 +18,7 @@ pub const FsCacheEntry = struct {
contents: string,
fd: StoredFileDescriptorType = 0,
// Null means its not usable
- mod_key: ?fs.FileSystem.Implementation.ModKey = null,
+ // mod_key: ?fs.FileSystem.Implementation.ModKey = null,
pub fn deinit(entry: *FsCacheEntry, allocator: *std.mem.Allocator) void {
if (entry.contents.len > 0) {
@@ -70,6 +70,7 @@ pub const Fs = struct {
if (_file_handle == null) {
file_handle = try std.fs.openFileAbsoluteZ(path, .{ .read = true });
+ fs.FileSystem.setMaxFd(file_handle.handle);
}
defer {
@@ -78,42 +79,17 @@ pub const Fs = struct {
}
}
- // If the file's modification key hasn't changed since it was cached, assume
- // the contents of the file are also the same and skip reading the file.
- var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
- switch (err) {
- error.FileNotFound, error.AccessDenied => {
- return err;
- },
- else => {
- if (isDebug) {
- Output.printError("modkey error: {s}", .{@errorName(err)});
- }
- break :handler null;
- },
+ const stat = try std.os.fstat(file_handle.handle);
+
+ var file = rfs.readFileWithHandle(path, @intCast(usize, stat.size), file_handle, true, shared) catch |err| {
+ if (isDebug) {
+ Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
}
+ return err;
};
- var file: fs.File = undefined;
- if (mod_key) |modk| {
- file = rfs.readFileWithHandle(path, modk.size, file_handle, true, shared) catch |err| {
- if (isDebug) {
- Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
- }
- return err;
- };
- } else {
- file = rfs.readFileWithHandle(path, null, file_handle, true, shared) catch |err| {
- if (isDebug) {
- Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
- }
- return err;
- };
- }
-
return Entry{
.contents = file.contents,
- .mod_key = mod_key,
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
};
}
@@ -151,47 +127,27 @@ pub const Fs = struct {
}
defer {
+ fs.FileSystem.setMaxFd(file_handle.handle);
+
if (rfs.needToCloseFiles() and _file_handle == null) {
file_handle.close();
}
}
- // If the file's modification key hasn't changed since it was cached, assume
- // the contents of the file are also the same and skip reading the file.
- var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
- switch (err) {
- error.FileNotFound, error.AccessDenied => {
- return err;
- },
- else => {
- if (isDebug) {
- Output.printError("modkey error: {s}", .{@errorName(err)});
- }
- break :handler null;
- },
- }
- };
+ const stat = try std.os.fstat(file_handle.handle);
var file: fs.File = undefined;
- if (mod_key) |modk| {
- file = rfs.readFileWithHandle(path, modk.size, file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
- if (isDebug) {
- Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
- }
- return err;
- };
- } else {
- file = rfs.readFileWithHandle(path, null, file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
- if (isDebug) {
- Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
- }
- return err;
- };
- }
+
+ file = rfs.readFileWithHandle(path, @intCast(usize, stat.size), file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
+ if (isDebug) {
+ Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
+ }
+ return err;
+ };
return Entry{
.contents = file.contents,
- .mod_key = mod_key,
+ // .mod_key = mod_key,
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
};
}
diff --git a/src/env_loader.zig b/src/env_loader.zig
index 7b1acdc95..6c35157ce 100644
--- a/src/env_loader.zig
+++ b/src/env_loader.zig
@@ -483,8 +483,8 @@ pub const Loader = struct {
// .env goes last
pub fn load(
this: *Loader,
- fs: *Fs.FileSystem.RealFS,
- dir: *Fs.FileSystem.DirEntry,
+ fs: *Fs.RealFS,
+ dir: *Fs.DirEntry,
comptime development: bool,
) !void {
const start = std.time.nanoTimestamp();
@@ -557,7 +557,7 @@ pub const Loader = struct {
Output.flush();
}
- pub fn loadEnvFile(this: *Loader, fs: *Fs.FileSystem.RealFS, dir: std.fs.Dir, comptime base: string, comptime override: bool) !void {
+ pub fn loadEnvFile(this: *Loader, fs: *Fs.RealFS, dir: std.fs.Dir, comptime base: string, comptime override: bool) !void {
if (@field(this, base) != null) {
return;
}
diff --git a/src/feature_flags.zig b/src/feature_flags.zig
index 777753561..a2c7fc059 100644
--- a/src/feature_flags.zig
+++ b/src/feature_flags.zig
@@ -74,6 +74,6 @@ pub const is_macro_enabled = true;
// useful for debugging the macro's JSX transform
pub const force_macro = false;
-pub const include_filename_in_jsx = false;
+pub const include_filename_in_jsx = true;
pub const verbose_analytics = false;
diff --git a/src/fs.zig b/src/fs.zig
index 0b28fa9b7..1740cfa8f 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -36,7 +36,7 @@ pub const BytecodeCacheFetcher = struct {
}
};
- pub fn fetch(this: *BytecodeCacheFetcher, sourcename: string, fs: *FileSystem.RealFS) ?StoredFileDescriptorType {
+ pub fn fetch(this: *BytecodeCacheFetcher, sourcename: string, fs: *RealFS) ?StoredFileDescriptorType {
switch (Available.determine(this.fd)) {
.Available => {
return this.fd.?;
@@ -65,13 +65,257 @@ pub const BytecodeCacheFetcher = struct {
}
};
+pub const EntriesOption = union(Tag) {
+ entries: DirEntry,
+ err: DirEntry.Err,
+
+ pub const Tag = enum {
+ entries,
+ err,
+ };
+
+ // This custom map implementation:
+ // - Preallocates a fixed amount of directory name space
+ // - Doesn't store directory names which don't exist.
+ pub const Map = allocators.BSSMap(EntriesOption, Preallocate.Counts.dir_entry, false, 128, true);
+};
+pub const DirEntry = struct {
+ pub const EntryMap = hash_map.StringHashMap(*Entry);
+ pub const EntryStore = allocators.BSSList(Entry, Preallocate.Counts.files);
+ dir: string,
+ fd: StoredFileDescriptorType = 0,
+ data: EntryMap,
+
+ pub fn removeEntry(dir: *DirEntry, name: string) !void {
+ dir.data.remove(name);
+ }
+
+ pub fn addEntry(dir: *DirEntry, entry: std.fs.Dir.Entry) !void {
+ var _kind: Entry.Kind = undefined;
+ switch (entry.kind) {
+ .Directory => {
+ _kind = Entry.Kind.dir;
+ },
+ .SymLink => {
+ // This might be wrong!
+ _kind = Entry.Kind.file;
+ },
+ .File => {
+ _kind = Entry.Kind.file;
+ },
+ else => {
+ return;
+ },
+ }
+ // entry.name only lives for the duration of the iteration
+
+ const name = if (entry.name.len >= strings.StringOrTinyString.Max)
+ strings.StringOrTinyString.init(try FileSystem.FilenameStore.instance.append(@TypeOf(entry.name), entry.name))
+ else
+ strings.StringOrTinyString.init(entry.name);
+
+ const name_lowercased = if (entry.name.len >= strings.StringOrTinyString.Max)
+ strings.StringOrTinyString.init(try FileSystem.FilenameStore.instance.appendLowerCase(@TypeOf(entry.name), entry.name))
+ else
+ strings.StringOrTinyString.initLowerCase(entry.name);
+
+ var stored = try EntryStore.instance.append(
+ Entry{
+ .base_ = name,
+ .base_lowercase_ = name_lowercased,
+ .dir = dir.dir,
+ .mutex = Mutex.init(),
+ // Call "stat" lazily for performance. The "@material-ui/icons" package
+ // contains a directory with over 11,000 entries in it and running "stat"
+ // for each entry was a big performance issue for that package.
+ .need_stat = entry.kind == .SymLink,
+ .cache = Entry.Cache{
+ .symlink = PathString.empty,
+ .kind = _kind,
+ },
+ },
+ );
+
+ const stored_name = stored.base();
+
+ try dir.data.put(stored.base_lowercase(), stored);
+ if (comptime FeatureFlags.verbose_fs) {
+ if (_kind == .dir) {
+ Output.prettyln(" + {s}/", .{stored_name});
+ } else {
+ Output.prettyln(" + {s}", .{stored_name});
+ }
+ }
+ }
+
+ pub fn updateDir(i: *DirEntry, dir: string) void {
+ var iter = i.data.iterator();
+ i.dir = dir;
+ while (iter.next()) |entry| {
+ entry.value_ptr.dir = dir;
+ }
+ }
+
+ pub fn empty(dir: string, allocator: *std.mem.Allocator) DirEntry {
+ return DirEntry{ .dir = dir, .data = EntryMap.init(allocator) };
+ }
+
+ pub fn init(dir: string, allocator: *std.mem.Allocator) DirEntry {
+ if (comptime FeatureFlags.verbose_fs) {
+ Output.prettyln("\n {s}", .{dir});
+ }
+
+ return DirEntry{ .dir = dir, .data = EntryMap.init(allocator) };
+ }
+
+ pub const Err = struct {
+ original_err: anyerror,
+ canonical_error: anyerror,
+ };
+
+ pub fn deinit(d: *DirEntry) void {
+ d.data.allocator.free(d.dir);
+
+ var iter = d.data.iterator();
+ while (iter.next()) |file_entry| {
+ // EntryStore.instance.at(file_entry.value).?.deinit(d.data.allocator);
+ }
+
+ d.data.deinit();
+ }
+
+ pub fn get(entry: *const DirEntry, _query: string) ?Entry.Lookup {
+ if (_query.len == 0) return null;
+ var scratch_lookup_buffer: [256]u8 = undefined;
+ std.debug.assert(scratch_lookup_buffer.len >= _query.len);
+
+ const query = strings.copyLowercase(_query, &scratch_lookup_buffer);
+ const result = entry.data.get(query) orelse return null;
+ const basename = result.base();
+ if (!strings.eql(basename, _query)) {
+ return Entry.Lookup{ .entry = result, .diff_case = Entry.Lookup.DifferentCase{
+ .dir = entry.dir,
+ .query = _query,
+ .actual = basename,
+ } };
+ }
+
+ return Entry.Lookup{ .entry = result, .diff_case = null };
+ }
+
+ pub fn getComptimeQuery(entry: *const DirEntry, comptime query_str: anytype) ?Entry.Lookup {
+ comptime var query: [query_str.len]u8 = undefined;
+ comptime for (query_str) |c, i| {
+ query[i] = std.ascii.toLower(c);
+ };
+
+ const query_hashed = comptime DirEntry.EntryMap.getHash(&query);
+
+ const result = entry.data.getWithHash(&query, query_hashed) orelse return null;
+ const basename = result.base();
+
+ if (!strings.eqlComptime(basename, comptime query[0..query_str.len])) {
+ return Entry.Lookup{
+ .entry = result,
+ .diff_case = Entry.Lookup.DifferentCase{
+ .dir = entry.dir,
+ .query = &query,
+ .actual = basename,
+ },
+ };
+ }
+
+ return Entry.Lookup{ .entry = result, .diff_case = null };
+ }
+
+ pub fn hasComptimeQuery(entry: *const DirEntry, comptime query_str: anytype) bool {
+ comptime var query: [query_str.len]u8 = undefined;
+ comptime for (query_str) |c, i| {
+ query[i] = std.ascii.toLower(c);
+ };
+
+ const query_hashed = comptime DirEntry.EntryMap.getHash(&query);
+
+ return entry.data.getWithHash(&query, query_hashed) != null;
+ }
+};
+
+pub const Entry = struct {
+ cache: Cache = Cache{},
+ dir: string,
+
+ base_: strings.StringOrTinyString,
+
+ // Necessary because the hash table uses it as a key
+ base_lowercase_: strings.StringOrTinyString,
+
+ mutex: Mutex,
+ need_stat: bool = true,
+
+ abs_path: PathString = PathString.empty,
+
+ pub inline fn base(this: *const Entry) string {
+ return this.base_.slice();
+ }
+
+ pub inline fn base_lowercase(this: *const Entry) string {
+ return this.base_lowercase_.slice();
+ }
+
+ pub const Lookup = struct {
+ entry: *Entry,
+ diff_case: ?DifferentCase,
+
+ pub const DifferentCase = struct {
+ dir: string,
+ query: string,
+ actual: string,
+ };
+ };
+
+ pub fn deinit(e: *Entry, allocator: *std.mem.Allocator) void {
+ e.base_.deinit(allocator);
+
+ allocator.free(e.dir);
+ allocator.free(e.cache.symlink.slice());
+ allocator.destroy(e);
+ }
+
+ pub const Cache = struct {
+ symlink: PathString = PathString.empty,
+ fd: StoredFileDescriptorType = 0,
+ kind: Kind = Kind.file,
+ };
+
+ pub const Kind = enum {
+ dir,
+ file,
+ };
+
+ pub fn kind(entry: *Entry, fs: *FileSystem.Implementation) Kind {
+ if (entry.need_stat) {
+ entry.need_stat = false;
+ entry.cache = fs.kind(entry.dir, entry.base(), entry.cache.fd) catch unreachable;
+ }
+ return entry.cache.kind;
+ }
+
+ pub fn symlink(entry: *Entry, fs: *FileSystem.Implementation) string {
+ if (entry.need_stat) {
+ entry.need_stat = false;
+ entry.cache = fs.kind(entry.dir, entry.base(), entry.cache.fd) catch unreachable;
+ }
+ return entry.cache.symlink.slice();
+ }
+};
+
pub const FileSystem = struct {
allocator: *std.mem.Allocator,
top_level_dir: string = "/",
fs: Implementation,
- dirname_store: *DirnameStore,
- filename_store: *FilenameStore,
+ dirname_store: *FileSystem.DirnameStore,
+ filename_store: *FileSystem.FilenameStore,
_tmpdir: ?std.fs.Dir = null,
@@ -139,8 +383,8 @@ pub const FileSystem = struct {
_top_level_dir,
),
// .stats = std.StringHashMap(Stat).init(allocator),
- .dirname_store = DirnameStore.init(allocator),
- .filename_store = FilenameStore.init(allocator),
+ .dirname_store = FileSystem.DirnameStore.init(allocator),
+ .filename_store = FileSystem.FilenameStore.init(allocator),
};
instance_loaded = true;
@@ -151,236 +395,6 @@ pub const FileSystem = struct {
return &instance;
}
- pub const DirEntry = struct {
- pub const EntryMap = hash_map.StringHashMap(*Entry);
- pub const EntryStore = allocators.BSSList(Entry, Preallocate.Counts.files);
- dir: string,
- fd: StoredFileDescriptorType = 0,
- data: EntryMap,
-
- pub fn removeEntry(dir: *DirEntry, name: string) !void {
- dir.data.remove(name);
- }
-
- pub fn addEntry(dir: *DirEntry, entry: std.fs.Dir.Entry) !void {
- var _kind: Entry.Kind = undefined;
- switch (entry.kind) {
- .Directory => {
- _kind = Entry.Kind.dir;
- },
- .SymLink => {
- // This might be wrong!
- _kind = Entry.Kind.file;
- },
- .File => {
- _kind = Entry.Kind.file;
- },
- else => {
- return;
- },
- }
- // entry.name only lives for the duration of the iteration
-
- const name = if (entry.name.len >= strings.StringOrTinyString.Max)
- strings.StringOrTinyString.init(try FileSystem.FilenameStore.instance.append(@TypeOf(entry.name), entry.name))
- else
- strings.StringOrTinyString.init(entry.name);
-
- const name_lowercased = if (entry.name.len >= strings.StringOrTinyString.Max)
- strings.StringOrTinyString.init(try FileSystem.FilenameStore.instance.appendLowerCase(@TypeOf(entry.name), entry.name))
- else
- strings.StringOrTinyString.initLowerCase(entry.name);
-
- var stored = try EntryStore.instance.append(
- Entry{
- .base_ = name,
- .base_lowercase_ = name_lowercased,
- .dir = dir.dir,
- .mutex = Mutex.init(),
- // Call "stat" lazily for performance. The "@material-ui/icons" package
- // contains a directory with over 11,000 entries in it and running "stat"
- // for each entry was a big performance issue for that package.
- .need_stat = entry.kind == .SymLink,
- .cache = Entry.Cache{
- .symlink = PathString.empty,
- .kind = _kind,
- },
- },
- );
-
- const stored_name = stored.base();
-
- try dir.data.put(stored.base_lowercase(), stored);
- if (comptime FeatureFlags.verbose_fs) {
- if (_kind == .dir) {
- Output.prettyln(" + {s}/", .{stored_name});
- } else {
- Output.prettyln(" + {s}", .{stored_name});
- }
- }
- }
-
- pub fn updateDir(i: *DirEntry, dir: string) void {
- var iter = i.data.iterator();
- i.dir = dir;
- while (iter.next()) |entry| {
- entry.value_ptr.dir = dir;
- }
- }
-
- pub fn empty(dir: string, allocator: *std.mem.Allocator) DirEntry {
- return DirEntry{ .dir = dir, .data = EntryMap.init(allocator) };
- }
-
- pub fn init(dir: string, allocator: *std.mem.Allocator) DirEntry {
- if (comptime FeatureFlags.verbose_fs) {
- Output.prettyln("\n {s}", .{dir});
- }
-
- return DirEntry{ .dir = dir, .data = EntryMap.init(allocator) };
- }
-
- pub const Err = struct {
- original_err: anyerror,
- canonical_error: anyerror,
- };
-
- pub fn deinit(d: *DirEntry) void {
- d.data.allocator.free(d.dir);
-
- var iter = d.data.iterator();
- while (iter.next()) |file_entry| {
- // EntryStore.instance.at(file_entry.value).?.deinit(d.data.allocator);
- }
-
- d.data.deinit();
- }
-
- pub fn get(entry: *const DirEntry, _query: string) ?Entry.Lookup {
- if (_query.len == 0) return null;
- var scratch_lookup_buffer: [256]u8 = undefined;
- std.debug.assert(scratch_lookup_buffer.len >= _query.len);
-
- const query = strings.copyLowercase(_query, &scratch_lookup_buffer);
- const result = entry.data.get(query) orelse return null;
- const basename = result.base();
- if (!strings.eql(basename, _query)) {
- return Entry.Lookup{ .entry = result, .diff_case = Entry.Lookup.DifferentCase{
- .dir = entry.dir,
- .query = _query,
- .actual = basename,
- } };
- }
-
- return Entry.Lookup{ .entry = result, .diff_case = null };
- }
-
- pub fn getComptimeQuery(entry: *const DirEntry, comptime query_str: anytype) ?Entry.Lookup {
- comptime var query: [query_str.len]u8 = undefined;
- comptime for (query_str) |c, i| {
- query[i] = std.ascii.toLower(c);
- };
-
- const query_hashed = comptime DirEntry.EntryMap.getHash(&query);
-
- const result = entry.data.getWithHash(&query, query_hashed) orelse return null;
- const basename = result.base();
-
- if (!strings.eqlComptime(basename, comptime query[0..query_str.len])) {
- return Entry.Lookup{
- .entry = result,
- .diff_case = Entry.Lookup.DifferentCase{
- .dir = entry.dir,
- .query = &query,
- .actual = basename,
- },
- };
- }
-
- return Entry.Lookup{ .entry = result, .diff_case = null };
- }
-
- pub fn hasComptimeQuery(entry: *const DirEntry, comptime query_str: anytype) bool {
- comptime var query: [query_str.len]u8 = undefined;
- comptime for (query_str) |c, i| {
- query[i] = std.ascii.toLower(c);
- };
-
- const query_hashed = comptime DirEntry.EntryMap.getHash(&query);
-
- return entry.data.getWithHash(&query, query_hashed) != null;
- }
- };
-
- pub const Entry = struct {
- cache: Cache = Cache{},
- dir: string,
-
- base_: strings.StringOrTinyString,
-
- // Necessary because the hash table uses it as a key
- base_lowercase_: strings.StringOrTinyString,
-
- mutex: Mutex,
- need_stat: bool = true,
-
- abs_path: PathString = PathString.empty,
-
- pub inline fn base(this: *const Entry) string {
- return this.base_.slice();
- }
-
- pub inline fn base_lowercase(this: *const Entry) string {
- return this.base_lowercase_.slice();
- }
-
- pub const Lookup = struct {
- entry: *Entry,
- diff_case: ?DifferentCase,
-
- pub const DifferentCase = struct {
- dir: string,
- query: string,
- actual: string,
- };
- };
-
- pub fn deinit(e: *Entry, allocator: *std.mem.Allocator) void {
- e.base_.deinit(allocator);
-
- allocator.free(e.dir);
- allocator.free(e.cache.symlink.slice());
- allocator.destroy(e);
- }
-
- pub const Cache = struct {
- symlink: PathString = PathString.empty,
- fd: StoredFileDescriptorType = 0,
- kind: Kind = Kind.file,
- };
-
- pub const Kind = enum {
- dir,
- file,
- };
-
- pub fn kind(entry: *Entry, fs: *Implementation) Kind {
- if (entry.need_stat) {
- entry.need_stat = false;
- entry.cache = fs.kind(entry.dir, entry.base(), entry.cache.fd) catch unreachable;
- }
- return entry.cache.kind;
- }
-
- pub fn symlink(entry: *Entry, fs: *Implementation) string {
- if (entry.need_stat) {
- entry.need_stat = false;
- entry.cache = fs.kind(entry.dir, entry.base(), entry.cache.fd) catch unreachable;
- }
- return entry.cache.symlink.slice();
- }
- };
-
// pub fn statBatch(fs: *FileSystemEntry, paths: []string) ![]?Stat {
// }
@@ -480,524 +494,6 @@ pub const FileSystem = struct {
return try allocator.dupe(u8, joined);
}
- pub const RealFS = struct {
- entries_mutex: Mutex = Mutex.init(),
- entries: *EntriesOption.Map,
- allocator: *std.mem.Allocator,
- // limiter: *Limiter,
- cwd: string,
- parent_fs: *FileSystem = undefined,
- file_limit: usize = 32,
- file_quota: usize = 32,
-
- pub var tmpdir_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
-
- const PLATFORM_TMP_DIR: string = switch (std.Target.current.os.tag) {
- .windows => "%TMPDIR%",
- .macos => "/private/tmp",
- else => "/tmp",
- };
-
- pub var tmpdir_path: []const u8 = undefined;
- pub fn openTmpDir(fs: *const RealFS) !std.fs.Dir {
- var tmpdir_base = std.os.getenv("TMPDIR") orelse PLATFORM_TMP_DIR;
- tmpdir_path = try std.fs.realpath(tmpdir_base, &tmpdir_buf);
- return try std.fs.openDirAbsolute(tmpdir_path, .{ .access_sub_paths = true, .iterate = true });
- }
-
- pub fn fetchCacheFile(fs: *RealFS, basename: string) !std.fs.File {
- const file = try fs._fetchCacheFile(basename);
- if (comptime FeatureFlags.store_file_descriptors) {
- setMaxFd(file.handle);
- }
- return file;
- }
-
- pub const Tmpfile = struct {
- fd: std.os.fd_t = 0,
- dir_fd: std.os.fd_t = 0,
-
- pub inline fn dir(this: *Tmpfile) std.fs.Dir {
- return std.fs.Dir{
- .fd = this.dir_fd,
- };
- }
-
- pub inline fn file(this: *Tmpfile) std.fs.File {
- return std.fs.File{
- .handle = this.fd,
- };
- }
-
- pub fn close(this: *Tmpfile) void {
- if (this.fd != 0) std.os.close(this.fd);
- }
-
- pub fn create(this: *Tmpfile, rfs: *RealFS, name: [*:0]const u8) !void {
- var tmpdir_ = try rfs.openTmpDir();
-
- const flags = std.os.O_CREAT | std.os.O_RDWR | std.os.O_CLOEXEC;
- this.dir_fd = tmpdir_.fd;
- this.fd = try std.os.openatZ(tmpdir_.fd, name, flags, std.os.S_IRWXO);
- }
-
- pub fn promote(this: *Tmpfile, from_name: [*:0]const u8, destination_fd: std.os.fd_t, name: [*:0]const u8) !void {
- std.debug.assert(this.fd != 0);
- std.debug.assert(this.dir_fd != 0);
-
- try C.moveFileZWithHandle(this.fd, this.dir_fd, from_name, destination_fd, name);
- this.close();
- }
-
- pub fn closeAndDelete(this: *Tmpfile, name: [*:0]const u8) void {
- this.close();
-
- if (comptime !Environment.isLinux) {
- if (this.dir_fd == 0) return;
-
- this.dir().deleteFileZ(name) catch {};
- }
- }
- };
-
- inline fn _fetchCacheFile(fs: *RealFS, basename: string) !std.fs.File {
- var parts = [_]string{ "node_modules", ".cache", basename };
- var path = fs.parent_fs.join(&parts);
- return std.fs.cwd().openFile(path, .{ .write = true, .read = true, .lock = .Shared }) catch |err| {
- path = fs.parent_fs.join(parts[0..2]);
- try std.fs.cwd().makePath(path);
-
- path = fs.parent_fs.join(&parts);
- return try std.fs.cwd().createFile(path, .{ .read = true, .lock = .Shared });
- };
- }
-
- pub fn needToCloseFiles(rfs: *const RealFS) bool {
- // On Windows, we must always close open file handles
- // Windows locks files
- if (comptime !FeatureFlags.store_file_descriptors) {
- return true;
- }
-
- // If we're not near the max amount of open files, don't worry about it.
- return !(rfs.file_limit > 254 and rfs.file_limit > (FileSystem.max_fd + 1) * 2);
- }
-
- pub fn bustEntriesCache(rfs: *RealFS, file_path: string) void {
- rfs.entries.remove(file_path);
- }
-
- // Always try to max out how many files we can keep open
- pub fn adjustUlimit() !usize {
- const LIMITS = [_]std.os.rlimit_resource{ std.os.rlimit_resource.STACK, std.os.rlimit_resource.NOFILE };
- inline for (LIMITS) |limit_type, i| {
- const limit = try std.os.getrlimit(limit_type);
-
- if (limit.cur < limit.max) {
- var new_limit = std.mem.zeroes(std.os.rlimit);
- new_limit.cur = limit.max;
- new_limit.max = limit.max;
-
- try std.os.setrlimit(limit_type, new_limit);
- }
-
- if (i == LIMITS.len - 1) return limit.max;
- }
- }
-
- var _entries_option_map: *EntriesOption.Map = undefined;
- var _entries_option_map_loaded: bool = false;
- pub fn init(
- allocator: *std.mem.Allocator,
- cwd: string,
- ) RealFS {
- const file_limit = adjustUlimit() catch unreachable;
-
- if (!_entries_option_map_loaded) {
- _entries_option_map = EntriesOption.Map.init(allocator);
- _entries_option_map_loaded = true;
- }
-
- return RealFS{
- .entries = _entries_option_map,
- .allocator = allocator,
- .cwd = cwd,
- .file_limit = file_limit,
- .file_quota = file_limit,
- };
- }
-
- pub const ModKeyError = error{
- Unusable,
- };
- pub const ModKey = struct {
- inode: std.fs.File.INode = 0,
- size: u64 = 0,
- mtime: i128 = 0,
- mode: std.fs.File.Mode = 0,
-
- threadlocal var hash_bytes: [32]u8 = undefined;
- threadlocal var hash_name_buf: [1024]u8 = undefined;
-
- pub fn hashName(
- this: *const ModKey,
- basename: string,
- ) !string {
-
- // We shouldn't just read the contents of the ModKey into memory
- // The hash should be deterministic across computers and operating systems.
- // inode is non-deterministic across volumes within the same compuiter
- // so if we're not going to do a full content hash, we should use mtime and size.
- // even mtime is debatable.
- var hash_bytes_remain: []u8 = hash_bytes[0..];
- std.mem.writeIntNative(@TypeOf(this.size), hash_bytes_remain[0..@sizeOf(@TypeOf(this.size))], this.size);
- hash_bytes_remain = hash_bytes_remain[@sizeOf(@TypeOf(this.size))..];
- std.mem.writeIntNative(@TypeOf(this.mtime), hash_bytes_remain[0..@sizeOf(@TypeOf(this.mtime))], this.mtime);
-
- return try std.fmt.bufPrint(
- &hash_name_buf,
- "{s}-{x}",
- .{
- basename,
- @truncate(u32, std.hash.Wyhash.hash(1, &hash_bytes)),
- },
- );
- }
-
- pub fn generate(fs: *RealFS, path: string, file: std.fs.File) anyerror!ModKey {
- const stat = try file.stat();
-
- const seconds = @divTrunc(stat.mtime, @as(@TypeOf(stat.mtime), std.time.ns_per_s));
-
- // We can't detect changes if the file system zeros out the modification time
- if (seconds == 0 and std.time.ns_per_s == 0) {
- return error.Unusable;
- }
-
- // Don't generate a modification key if the file is too new
- const now = std.time.nanoTimestamp();
- const now_seconds = @divTrunc(now, std.time.ns_per_s);
- if (seconds > seconds or (seconds == now_seconds and stat.mtime > now)) {
- return error.Unusable;
- }
-
- return ModKey{
- .inode = stat.inode,
- .size = stat.size,
- .mtime = stat.mtime,
- .mode = stat.mode,
- // .uid = stat.
- };
- }
- pub const SafetyGap = 3;
- };
-
- pub fn modKeyWithFile(fs: *RealFS, path: string, file: anytype) anyerror!ModKey {
- return try ModKey.generate(fs, path, file);
- }
-
- pub fn modKey(fs: *RealFS, path: string) anyerror!ModKey {
- // fs.limiter.before();
- // defer fs.limiter.after();
- var file = try std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true });
- defer {
- if (fs.needToCloseFiles()) {
- file.close();
- }
- }
- return try fs.modKeyWithFile(path, file);
- }
-
- pub const EntriesOption = union(Tag) {
- entries: DirEntry,
- err: DirEntry.Err,
-
- pub const Tag = enum {
- entries,
- err,
- };
-
- // This custom map implementation:
- // - Preallocates a fixed amount of directory name space
- // - Doesn't store directory names which don't exist.
- pub const Map = allocators.BSSMap(EntriesOption, Preallocate.Counts.dir_entry, false, 128, true);
- };
-
- // Limit the number of files open simultaneously to avoid ulimit issues
- pub const Limiter = struct {
- semaphore: Semaphore,
- pub fn init(allocator: *std.mem.Allocator, limit: usize) Limiter {
- return Limiter{
- .semaphore = Semaphore.init(limit),
- // .counter = std.atomic.Int(u8).init(0),
- // .lock = std.Thread.Mutex.init(),
- };
- }
-
- // This will block if the number of open files is already at the limit
- pub fn before(limiter: *Limiter) void {
- limiter.semaphore.wait();
- // var added = limiter.counter.fetchAdd(1);
- }
-
- pub fn after(limiter: *Limiter) void {
- limiter.semaphore.post();
- // limiter.counter.decr();
- // if (limiter.held) |hold| {
- // hold.release();
- // limiter.held = null;
- // }
- }
- };
-
- pub fn openDir(fs: *RealFS, unsafe_dir_string: string) std.fs.File.OpenError!std.fs.Dir {
- return try std.fs.openDirAbsolute(unsafe_dir_string, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true, .no_follow = false });
- }
-
- fn readdir(
- fs: *RealFS,
- _dir: string,
- handle: std.fs.Dir,
- ) !DirEntry {
- // fs.limiter.before();
- // defer fs.limiter.after();
-
- var iter: std.fs.Dir.Iterator = handle.iterate();
- var dir = DirEntry.init(_dir, fs.allocator);
- errdefer dir.deinit();
-
- if (FeatureFlags.store_file_descriptors) {
- FileSystem.setMaxFd(handle.fd);
- dir.fd = handle.fd;
- }
-
- while (try iter.next()) |_entry| {
- try dir.addEntry(_entry);
- }
-
- return dir;
- }
-
- fn readDirectoryError(fs: *RealFS, dir: string, err: anyerror) !*EntriesOption {
- if (comptime FeatureFlags.enable_entry_cache) {
- var get_or_put_result = try fs.entries.getOrPut(dir);
- var opt = try fs.entries.put(&get_or_put_result, EntriesOption{
- .err = DirEntry.Err{ .original_err = err, .canonical_error = err },
- });
-
- return opt;
- }
-
- temp_entries_option = EntriesOption{
- .err = DirEntry.Err{ .original_err = err, .canonical_error = err },
- };
- return &temp_entries_option;
- }
-
- threadlocal var temp_entries_option: EntriesOption = undefined;
-
- pub fn readDirectory(fs: *RealFS, _dir: string, _handle: ?std.fs.Dir) !*EntriesOption {
- var dir = _dir;
- var cache_result: ?allocators.Result = null;
- if (comptime FeatureFlags.enable_entry_cache) {
- fs.entries_mutex.lock();
- }
- defer {
- if (comptime FeatureFlags.enable_entry_cache) {
- fs.entries_mutex.unlock();
- }
- }
-
- if (comptime FeatureFlags.enable_entry_cache) {
- cache_result = try fs.entries.getOrPut(dir);
-
- if (cache_result.?.hasCheckedIfExists()) {
- if (fs.entries.atIndex(cache_result.?.index)) |cached_result| {
- return cached_result;
- }
- }
- }
-
- var handle = _handle orelse try fs.openDir(dir);
-
- defer {
- if (_handle == null and fs.needToCloseFiles()) {
- handle.close();
- }
- }
-
- // if we get this far, it's a real directory, so we can just store the dir name.
- if (_handle == null) {
- dir = try DirnameStore.instance.append(string, _dir);
- }
-
- // Cache miss: read the directory entries
- var entries = fs.readdir(
- dir,
- handle,
- ) catch |err| {
- return fs.readDirectoryError(dir, err) catch unreachable;
- };
-
- if (comptime FeatureFlags.enable_entry_cache) {
- const result = EntriesOption{
- .entries = entries,
- };
-
- var out = try fs.entries.put(&cache_result.?, result);
-
- return out;
- }
-
- temp_entries_option = EntriesOption{ .entries = entries };
-
- return &temp_entries_option;
- }
-
- fn readFileError(fs: *RealFS, path: string, err: anyerror) void {}
-
- pub fn readFileWithHandle(
- fs: *RealFS,
- path: string,
- _size: ?usize,
- file: std.fs.File,
- comptime use_shared_buffer: bool,
- shared_buffer: *MutableString,
- ) !File {
- FileSystem.setMaxFd(file.handle);
-
- if (comptime FeatureFlags.disable_filesystem_cache) {
- _ = std.os.fcntl(file.handle, std.os.F_NOCACHE, 1) catch 0;
- }
-
- // Skip the extra file.stat() call when possible
- var size = _size orelse (file.getEndPos() catch |err| {
- fs.readFileError(path, err);
- return err;
- });
-
- // Skip the pread call for empty files
- // Otherwise will get out of bounds errors
- // plus it's an unnecessary syscall
- if (size == 0) {
- if (comptime use_shared_buffer) {
- shared_buffer.reset();
- return File{ .path = Path.init(path), .contents = shared_buffer.list.items };
- } else {
- return File{ .path = Path.init(path), .contents = "" };
- }
- }
-
- var file_contents: []u8 = undefined;
-
- // When we're serving a JavaScript-like file over HTTP, we do not want to cache the contents in memory
- // This imposes a performance hit because not reading from disk is faster than reading from disk
- // Part of that hit is allocating a temporary buffer to store the file contents in
- // As a mitigation, we can just keep one buffer forever and re-use it for the parsed files
- if (use_shared_buffer) {
- shared_buffer.reset();
- try shared_buffer.growBy(size);
- shared_buffer.list.expandToCapacity();
- // We use pread to ensure if the file handle was open, it doesn't seek from the last position
- var read_count = file.preadAll(shared_buffer.list.items, 0) catch |err| {
- fs.readFileError(path, err);
- return err;
- };
- shared_buffer.list.items = shared_buffer.list.items[0..read_count];
- file_contents = shared_buffer.list.items;
- } else {
- // We use pread to ensure if the file handle was open, it doesn't seek from the last position
- var buf = try fs.allocator.alloc(u8, size);
- var read_count = file.preadAll(buf, 0) catch |err| {
- fs.readFileError(path, err);
- return err;
- };
- file_contents = buf[0..read_count];
- }
-
- return File{ .path = Path.init(path), .contents = file_contents };
- }
-
- pub fn readFile(
- fs: *RealFS,
- path: string,
- _size: ?usize,
- ) !File {
- fs.limiter.before();
- defer fs.limiter.after();
- const file: std.fs.File = std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true, .write = false }) catch |err| {
- fs.readFileError(path, err);
- return err;
- };
- defer {
- if (fs.needToCloseFiles()) {
- file.close();
- }
- }
-
- return try fs.readFileWithHandle(path, _size, file);
- }
-
- pub fn kind(fs: *RealFS, _dir: string, base: string, existing_fd: StoredFileDescriptorType) !Entry.Cache {
- var dir = _dir;
- var combo = [2]string{ dir, base };
- var outpath: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- var entry_path = path_handler.joinAbsStringBuf(fs.cwd, &outpath, &combo, .auto);
-
- outpath[entry_path.len + 1] = 0;
- outpath[entry_path.len] = 0;
-
- const absolute_path_c: [:0]const u8 = outpath[0..entry_path.len :0];
-
- var stat = try C.lstat_absolute(absolute_path_c);
- const is_symlink = stat.kind == std.fs.File.Kind.SymLink;
- var _kind = stat.kind;
- var cache = Entry.Cache{
- .kind = Entry.Kind.file,
- .symlink = PathString.empty,
- };
- var symlink: []const u8 = "";
-
- if (is_symlink) {
- var file = if (existing_fd != 0) std.fs.File{ .handle = existing_fd } else try std.fs.openFileAbsoluteZ(absolute_path_c, .{ .read = true });
- setMaxFd(file.handle);
-
- defer {
- if (fs.needToCloseFiles() and existing_fd == 0) {
- file.close();
- } else if (comptime FeatureFlags.store_file_descriptors) {
- cache.fd = file.handle;
- }
- }
- const _stat = try file.stat();
-
- symlink = try std.os.getFdPath(file.handle, &outpath);
-
- _kind = _stat.kind;
- }
-
- std.debug.assert(_kind != .SymLink);
-
- if (_kind == .Directory) {
- cache.kind = .dir;
- } else {
- cache.kind = .file;
- }
- if (symlink.len > 0) {
- cache.symlink = PathString.init(try FilenameStore.instance.append([]const u8, symlink));
- }
-
- return cache;
- }
-
- // // Stores the file entries for directories we've listed before
- // entries_mutex: std.Mutex
- // entries map[string]entriesOrErr
-
- // // If true, do not use the "entries" cache
- // doNotCacheEntries bool
- };
-
pub const Implementation = switch (build_target) {
.wasi, .native => RealFS,
.wasm => WasmFS,
@@ -1120,7 +616,7 @@ pub const Path = struct {
}
// This duplicates but only when strictly necessary
- // This will skip allocating if it's already in FilenameStore or DirnameStore
+ // This will skip allocating if it's already in FileSystem.FilenameStore or FileSystem.DirnameStore
pub fn dupeAlloc(this: *const Path, allocator: *std.mem.Allocator) !Fs.Path {
if (this.text.ptr == this.pretty.ptr and this.text.len == this.text.len) {
if (FileSystem.FilenameStore.instance.exists(this.text) or FileSystem.DirnameStore.instance.exists(this.text)) {
@@ -1250,6 +746,509 @@ pub const Path = struct {
}
};
+pub const RealFS = struct {
+ entries_mutex: Mutex = Mutex.init(),
+ entries: *EntriesOption.Map,
+ allocator: *std.mem.Allocator,
+ // limiter: *Limiter,
+ cwd: string,
+ parent_fs: *FileSystem = undefined,
+ file_limit: usize = 32,
+ file_quota: usize = 32,
+
+ pub var tmpdir_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
+
+ const PLATFORM_TMP_DIR: string = switch (std.Target.current.os.tag) {
+ .windows => "%TMPDIR%",
+ .macos => "/private/tmp",
+ else => "/tmp",
+ };
+
+ pub var tmpdir_path: []const u8 = undefined;
+ pub fn openTmpDir(fs: *const RealFS) !std.fs.Dir {
+ var tmpdir_base = std.os.getenv("TMPDIR") orelse PLATFORM_TMP_DIR;
+ tmpdir_path = try std.fs.realpath(tmpdir_base, &tmpdir_buf);
+ return try std.fs.openDirAbsolute(tmpdir_path, .{ .access_sub_paths = true, .iterate = true });
+ }
+
+ pub fn fetchCacheFile(fs: *RealFS, basename: string) !std.fs.File {
+ const file = try fs._fetchCacheFile(basename);
+ if (comptime FeatureFlags.store_file_descriptors) {
+ FileSystem.setMaxFd(file.handle);
+ }
+ return file;
+ }
+
+ pub const Tmpfile = struct {
+ fd: std.os.fd_t = 0,
+ dir_fd: std.os.fd_t = 0,
+
+ pub inline fn dir(this: *Tmpfile) std.fs.Dir {
+ return std.fs.Dir{
+ .fd = this.dir_fd,
+ };
+ }
+
+ pub inline fn file(this: *Tmpfile) std.fs.File {
+ return std.fs.File{
+ .handle = this.fd,
+ };
+ }
+
+ pub fn close(this: *Tmpfile) void {
+ if (this.fd != 0) std.os.close(this.fd);
+ }
+
+ pub fn create(this: *Tmpfile, rfs: *RealFS, name: [*:0]const u8) !void {
+ var tmpdir_ = try rfs.openTmpDir();
+
+ const flags = std.os.O_CREAT | std.os.O_RDWR | std.os.O_CLOEXEC;
+ this.dir_fd = tmpdir_.fd;
+ this.fd = try std.os.openatZ(tmpdir_.fd, name, flags, std.os.S_IRWXO);
+ }
+
+ pub fn promote(this: *Tmpfile, from_name: [*:0]const u8, destination_fd: std.os.fd_t, name: [*:0]const u8) !void {
+ std.debug.assert(this.fd != 0);
+ std.debug.assert(this.dir_fd != 0);
+
+ try C.moveFileZWithHandle(this.fd, this.dir_fd, from_name, destination_fd, name);
+ this.close();
+ }
+
+ pub fn closeAndDelete(this: *Tmpfile, name: [*:0]const u8) void {
+ this.close();
+
+ if (comptime !Environment.isLinux) {
+ if (this.dir_fd == 0) return;
+
+ this.dir().deleteFileZ(name) catch {};
+ }
+ }
+ };
+
+ inline fn _fetchCacheFile(fs: *RealFS, basename: string) !std.fs.File {
+ var parts = [_]string{ "node_modules", ".cache", basename };
+ var path = fs.parent_fs.join(&parts);
+ return std.fs.cwd().openFile(path, .{ .write = true, .read = true, .lock = .Shared }) catch |err| {
+ path = fs.parent_fs.join(parts[0..2]);
+ try std.fs.cwd().makePath(path);
+
+ path = fs.parent_fs.join(&parts);
+ return try std.fs.cwd().createFile(path, .{ .read = true, .lock = .Shared });
+ };
+ }
+
+ pub fn needToCloseFiles(rfs: *const RealFS) bool {
+ // On Windows, we must always close open file handles
+ // Windows locks files
+ if (comptime !FeatureFlags.store_file_descriptors) {
+ return true;
+ }
+
+ // If we're not near the max amount of open files, don't worry about it.
+ return !(rfs.file_limit > 254 and rfs.file_limit > (FileSystem.max_fd + 1) * 2);
+ }
+
+ pub fn bustEntriesCache(rfs: *RealFS, file_path: string) void {
+ rfs.entries.remove(file_path);
+ }
+
+ // Always try to max out how many files we can keep open
+ pub fn adjustUlimit() !usize {
+ const LIMITS = [_]std.os.rlimit_resource{ std.os.rlimit_resource.STACK, std.os.rlimit_resource.NOFILE };
+ inline for (LIMITS) |limit_type, i| {
+ const limit = try std.os.getrlimit(limit_type);
+
+ if (limit.cur < limit.max) {
+ var new_limit = std.mem.zeroes(std.os.rlimit);
+ new_limit.cur = limit.max;
+ new_limit.max = limit.max;
+
+ try std.os.setrlimit(limit_type, new_limit);
+ }
+
+ if (i == LIMITS.len - 1) return limit.max;
+ }
+ }
+
+ var _entries_option_map: *EntriesOption.Map = undefined;
+ var _entries_option_map_loaded: bool = false;
+ pub fn init(
+ allocator: *std.mem.Allocator,
+ cwd: string,
+ ) RealFS {
+ const file_limit = adjustUlimit() catch unreachable;
+
+ if (!_entries_option_map_loaded) {
+ _entries_option_map = EntriesOption.Map.init(allocator);
+ _entries_option_map_loaded = true;
+ }
+
+ return RealFS{
+ .entries = _entries_option_map,
+ .allocator = allocator,
+ .cwd = cwd,
+ .file_limit = file_limit,
+ .file_quota = file_limit,
+ };
+ }
+
+ pub const ModKeyError = error{
+ Unusable,
+ };
+ pub const ModKey = struct {
+ inode: std.fs.File.INode = 0,
+ size: u64 = 0,
+ mtime: i128 = 0,
+ mode: std.fs.File.Mode = 0,
+
+ threadlocal var hash_bytes: [32]u8 = undefined;
+ threadlocal var hash_name_buf: [1024]u8 = undefined;
+
+ pub fn hashName(
+ this: *const ModKey,
+ basename: string,
+ ) !string {
+
+ // We shouldn't just read the contents of the ModKey into memory
+ // The hash should be deterministic across computers and operating systems.
+ // inode is non-deterministic across volumes within the same compuiter
+ // so if we're not going to do a full content hash, we should use mtime and size.
+ // even mtime is debatable.
+ var hash_bytes_remain: []u8 = hash_bytes[0..];
+ std.mem.writeIntNative(@TypeOf(this.size), hash_bytes_remain[0..@sizeOf(@TypeOf(this.size))], this.size);
+ hash_bytes_remain = hash_bytes_remain[@sizeOf(@TypeOf(this.size))..];
+ std.mem.writeIntNative(@TypeOf(this.mtime), hash_bytes_remain[0..@sizeOf(@TypeOf(this.mtime))], this.mtime);
+
+ return try std.fmt.bufPrint(
+ &hash_name_buf,
+ "{s}-{x}",
+ .{
+ basename,
+ @truncate(u32, std.hash.Wyhash.hash(1, &hash_bytes)),
+ },
+ );
+ }
+
+ pub fn generate(fs: *RealFS, path: string, file: std.fs.File) anyerror!ModKey {
+ const stat = try file.stat();
+
+ const seconds = @divTrunc(stat.mtime, @as(@TypeOf(stat.mtime), std.time.ns_per_s));
+
+ // We can't detect changes if the file system zeros out the modification time
+ if (seconds == 0 and std.time.ns_per_s == 0) {
+ return error.Unusable;
+ }
+
+ // Don't generate a modification key if the file is too new
+ const now = std.time.nanoTimestamp();
+ const now_seconds = @divTrunc(now, std.time.ns_per_s);
+ if (seconds > seconds or (seconds == now_seconds and stat.mtime > now)) {
+ return error.Unusable;
+ }
+
+ return ModKey{
+ .inode = stat.inode,
+ .size = stat.size,
+ .mtime = stat.mtime,
+ .mode = stat.mode,
+ // .uid = stat.
+ };
+ }
+ pub const SafetyGap = 3;
+ };
+
+ pub fn modKeyWithFile(fs: *RealFS, path: string, file: anytype) anyerror!ModKey {
+ return try ModKey.generate(fs, path, file);
+ }
+
+ pub fn modKey(fs: *RealFS, path: string) anyerror!ModKey {
+ // fs.limiter.before();
+ // defer fs.limiter.after();
+ var file = try std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true });
+ defer {
+ if (fs.needToCloseFiles()) {
+ file.close();
+ }
+ }
+ return try fs.modKeyWithFile(path, file);
+ }
+
+ // Limit the number of files open simultaneously to avoid ulimit issues
+ pub const Limiter = struct {
+ semaphore: Semaphore,
+ pub fn init(allocator: *std.mem.Allocator, limit: usize) Limiter {
+ return Limiter{
+ .semaphore = Semaphore.init(limit),
+ // .counter = std.atomic.Int(u8).init(0),
+ // .lock = std.Thread.Mutex.init(),
+ };
+ }
+
+ // This will block if the number of open files is already at the limit
+ pub fn before(limiter: *Limiter) void {
+ limiter.semaphore.wait();
+ // var added = limiter.counter.fetchAdd(1);
+ }
+
+ pub fn after(limiter: *Limiter) void {
+ limiter.semaphore.post();
+ // limiter.counter.decr();
+ // if (limiter.held) |hold| {
+ // hold.release();
+ // limiter.held = null;
+ // }
+ }
+ };
+
+ pub fn openDir(fs: *RealFS, unsafe_dir_string: string) std.fs.File.OpenError!std.fs.Dir {
+ return try std.fs.openDirAbsolute(unsafe_dir_string, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true, .no_follow = false });
+ }
+
+ fn readdir(
+ fs: *RealFS,
+ _dir: string,
+ handle: std.fs.Dir,
+ ) !DirEntry {
+ // fs.limiter.before();
+ // defer fs.limiter.after();
+
+ var iter: std.fs.Dir.Iterator = handle.iterate();
+ var dir = DirEntry.init(_dir, fs.allocator);
+ errdefer dir.deinit();
+
+ if (FeatureFlags.store_file_descriptors) {
+ FileSystem.setMaxFd(handle.fd);
+ dir.fd = handle.fd;
+ }
+
+ while (try iter.next()) |_entry| {
+ try dir.addEntry(_entry);
+ }
+
+ return dir;
+ }
+
+ fn readDirectoryError(fs: *RealFS, dir: string, err: anyerror) !*EntriesOption {
+ if (comptime FeatureFlags.enable_entry_cache) {
+ var get_or_put_result = try fs.entries.getOrPut(dir);
+ var opt = try fs.entries.put(&get_or_put_result, EntriesOption{
+ .err = DirEntry.Err{ .original_err = err, .canonical_error = err },
+ });
+
+ return opt;
+ }
+
+ temp_entries_option = EntriesOption{
+ .err = DirEntry.Err{ .original_err = err, .canonical_error = err },
+ };
+ return &temp_entries_option;
+ }
+
+ threadlocal var temp_entries_option: EntriesOption = undefined;
+
+ pub fn readDirectory(fs: *RealFS, _dir: string, _handle: ?std.fs.Dir) !*EntriesOption {
+ var dir = _dir;
+ var cache_result: ?allocators.Result = null;
+ if (comptime FeatureFlags.enable_entry_cache) {
+ fs.entries_mutex.lock();
+ }
+ defer {
+ if (comptime FeatureFlags.enable_entry_cache) {
+ fs.entries_mutex.unlock();
+ }
+ }
+
+ if (comptime FeatureFlags.enable_entry_cache) {
+ cache_result = try fs.entries.getOrPut(dir);
+
+ if (cache_result.?.hasCheckedIfExists()) {
+ if (fs.entries.atIndex(cache_result.?.index)) |cached_result| {
+ return cached_result;
+ }
+ }
+ }
+
+ var handle = _handle orelse try fs.openDir(dir);
+
+ defer {
+ if (_handle == null and fs.needToCloseFiles()) {
+ handle.close();
+ }
+ }
+
+ // if we get this far, it's a real directory, so we can just store the dir name.
+ if (_handle == null) {
+ dir = try FileSystem.DirnameStore.instance.append(string, _dir);
+ }
+
+ // Cache miss: read the directory entries
+ var entries = fs.readdir(
+ dir,
+ handle,
+ ) catch |err| {
+ return fs.readDirectoryError(dir, err) catch unreachable;
+ };
+
+ if (comptime FeatureFlags.enable_entry_cache) {
+ const result = EntriesOption{
+ .entries = entries,
+ };
+
+ var out = try fs.entries.put(&cache_result.?, result);
+
+ return out;
+ }
+
+ temp_entries_option = EntriesOption{ .entries = entries };
+
+ return &temp_entries_option;
+ }
+
+ fn readFileError(fs: *RealFS, path: string, err: anyerror) void {}
+
+ pub fn readFileWithHandle(
+ fs: *RealFS,
+ path: string,
+ _size: ?usize,
+ file: std.fs.File,
+ comptime use_shared_buffer: bool,
+ shared_buffer: *MutableString,
+ ) !File {
+ FileSystem.setMaxFd(file.handle);
+
+ if (comptime FeatureFlags.disable_filesystem_cache) {
+ _ = std.os.fcntl(file.handle, std.os.F_NOCACHE, 1) catch 0;
+ }
+
+ // Skip the extra file.stat() call when possible
+ var size = _size orelse (file.getEndPos() catch |err| {
+ fs.readFileError(path, err);
+ return err;
+ });
+
+ // Skip the pread call for empty files
+ // Otherwise will get out of bounds errors
+ // plus it's an unnecessary syscall
+ if (size == 0) {
+ if (comptime use_shared_buffer) {
+ shared_buffer.reset();
+ return File{ .path = Path.init(path), .contents = shared_buffer.list.items };
+ } else {
+ return File{ .path = Path.init(path), .contents = "" };
+ }
+ }
+
+ var file_contents: []u8 = undefined;
+
+ // When we're serving a JavaScript-like file over HTTP, we do not want to cache the contents in memory
+ // This imposes a performance hit because not reading from disk is faster than reading from disk
+ // Part of that hit is allocating a temporary buffer to store the file contents in
+ // As a mitigation, we can just keep one buffer forever and re-use it for the parsed files
+ if (use_shared_buffer) {
+ shared_buffer.reset();
+ try shared_buffer.growBy(size);
+ shared_buffer.list.expandToCapacity();
+ // We use pread to ensure if the file handle was open, it doesn't seek from the last position
+ var read_count = file.preadAll(shared_buffer.list.items, 0) catch |err| {
+ fs.readFileError(path, err);
+ return err;
+ };
+ shared_buffer.list.items = shared_buffer.list.items[0..read_count];
+ file_contents = shared_buffer.list.items;
+ } else {
+ // We use pread to ensure if the file handle was open, it doesn't seek from the last position
+ var buf = try fs.allocator.alloc(u8, size);
+ var read_count = file.preadAll(buf, 0) catch |err| {
+ fs.readFileError(path, err);
+ return err;
+ };
+ file_contents = buf[0..read_count];
+ }
+
+ return File{ .path = Path.init(path), .contents = file_contents };
+ }
+
+ pub fn readFile(
+ fs: *RealFS,
+ path: string,
+ _size: ?usize,
+ ) !File {
+ fs.limiter.before();
+ defer fs.limiter.after();
+ const file: std.fs.File = std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true, .write = false }) catch |err| {
+ fs.readFileError(path, err);
+ return err;
+ };
+ defer {
+ if (fs.needToCloseFiles()) {
+ file.close();
+ }
+ }
+
+ return try fs.readFileWithHandle(path, _size, file);
+ }
+
+ pub fn kind(fs: *RealFS, _dir: string, base: string, existing_fd: StoredFileDescriptorType) !Entry.Cache {
+ var dir = _dir;
+ var combo = [2]string{ dir, base };
+ var outpath: [std.fs.MAX_PATH_BYTES]u8 = undefined;
+ var entry_path = path_handler.joinAbsStringBuf(fs.cwd, &outpath, &combo, .auto);
+
+ outpath[entry_path.len + 1] = 0;
+ outpath[entry_path.len] = 0;
+
+ const absolute_path_c: [:0]const u8 = outpath[0..entry_path.len :0];
+
+ var stat = try C.lstat_absolute(absolute_path_c);
+ const is_symlink = stat.kind == std.fs.File.Kind.SymLink;
+ var _kind = stat.kind;
+ var cache = Entry.Cache{
+ .kind = Entry.Kind.file,
+ .symlink = PathString.empty,
+ };
+ var symlink: []const u8 = "";
+
+ if (is_symlink) {
+ var file = if (existing_fd != 0) std.fs.File{ .handle = existing_fd } else try std.fs.openFileAbsoluteZ(absolute_path_c, .{ .read = true });
+ FileSystem.setMaxFd(file.handle);
+
+ defer {
+ if (fs.needToCloseFiles() and existing_fd == 0) {
+ file.close();
+ } else if (comptime FeatureFlags.store_file_descriptors) {
+ cache.fd = file.handle;
+ }
+ }
+ const _stat = try file.stat();
+
+ symlink = try std.os.getFdPath(file.handle, &outpath);
+
+ _kind = _stat.kind;
+ }
+
+ std.debug.assert(_kind != .SymLink);
+
+ if (_kind == .Directory) {
+ cache.kind = .dir;
+ } else {
+ cache.kind = .file;
+ }
+ if (symlink.len > 0) {
+ cache.symlink = PathString.init(try FileSystem.FilenameStore.instance.append([]const u8, symlink));
+ }
+
+ return cache;
+ }
+
+ // // Stores the file entries for directories we've listed before
+ // entries_mutex: std.Mutex
+ // entries map[string]entriesOrErr
+
+ // // If true, do not use the "entries" cache
+ // doNotCacheEntries bool
+};
+
test "PathName.init" {
var file = "/root/directory/file.ext".*;
const res = PathName.init(
diff --git a/src/http.zig b/src/http.zig
index 33c544436..cf3a4d76d 100644
--- a/src/http.zig
+++ b/src/http.zig
@@ -452,6 +452,10 @@ pub const RequestContext = struct {
ctx.appendHeader("Content-Length", content_length_header_buf[0..std.fmt.formatIntBuf(&content_length_header_buf, length, 10, .upper, .{})]);
}
+ if (ctx.header("Cookie")) |cookie| {
+ ctx.appendHeader("Set-Cookie", cookie.value);
+ }
+
try ctx.flushHeaders();
}
@@ -2504,7 +2508,7 @@ pub const Server = struct {
defer ctx.watcher.flushEvictions();
defer Output.flush();
- var rfs: *Fs.FileSystem.RealFS = &ctx.bundler.fs.fs;
+ var rfs: *Fs.RealFS = &ctx.bundler.fs.fs;
// It's important that this function does not do any memory allocations
// If this blocks, it can cause cascading bad things to happen
@@ -2579,7 +2583,7 @@ pub const Server = struct {
}
fn run(server: *Server, comptime features: ConnectionFeatures) !void {
- _ = Fs.FileSystem.RealFS.adjustUlimit() catch {};
+ _ = Fs.RealFS.adjustUlimit() catch {};
RequestContext.WebsocketHandler.open_websockets = @TypeOf(
RequestContext.WebsocketHandler.open_websockets,
).init(server.allocator);
diff --git a/src/linker.zig b/src/linker.zig
index ecc6951d8..176c33ca0 100644
--- a/src/linker.zig
+++ b/src/linker.zig
@@ -96,7 +96,7 @@ pub const Linker = struct {
var file: std.fs.File = if (fd) |_fd| std.fs.File{ .handle = _fd } else try std.fs.openFileAbsolute(file_path.text, .{ .read = true });
Fs.FileSystem.setMaxFd(file.handle);
- var modkey = try Fs.FileSystem.RealFS.ModKey.generate(&this.fs.fs, file_path.text, file);
+ var modkey = try Fs.RealFS.ModKey.generate(&this.fs.fs, file_path.text, file);
const hash_name = try modkey.hashName(file_path.name.base);
if (Bundler.isCacheEnabled) {
diff --git a/src/node_module_bundle.zig b/src/node_module_bundle.zig
index 4fd781829..f5421c810 100644
--- a/src/node_module_bundle.zig
+++ b/src/node_module_bundle.zig
@@ -53,7 +53,7 @@ pub const NodeModuleBundle = struct {
return this.package_name_map.contains("react-refresh");
}
- pub inline fn fetchByteCodeCache(this: *NodeModuleBundle, basename: string, fs: *Fs.FileSystem.RealFS) ?StoredFileDescriptorType {
+ pub inline fn fetchByteCodeCache(this: *NodeModuleBundle, basename: string, fs: *Fs.RealFS) ?StoredFileDescriptorType {
return this.bytecode_cache_fetcher.fetch(basename, fs);
}
diff --git a/src/options.zig b/src/options.zig
index 1babfa194..e57c8f043 100644
--- a/src/options.zig
+++ b/src/options.zig
@@ -1219,7 +1219,7 @@ pub const BundleOptions = struct {
if (!static_dir_set) {
chosen_dir = choice: {
if (fs.fs.readDirectory(fs.top_level_dir, null)) |dir_| {
- const dir: *const Fs.FileSystem.RealFS.EntriesOption = dir_;
+ const dir: *const Fs.EntriesOption = dir_;
switch (dir.*) {
.entries => {
if (dir.entries.getComptimeQuery("public")) |q| {
diff --git a/src/resolver/dir_info.zig b/src/resolver/dir_info.zig
index 396413327..335938b4c 100644
--- a/src/resolver/dir_info.zig
+++ b/src/resolver/dir_info.zig
@@ -45,7 +45,7 @@ pub fn getFileDescriptor(dirinfo: *const DirInfo) StoredFileDescriptorType {
}
}
-pub fn getEntries(dirinfo: *const DirInfo) ?*Fs.FileSystem.DirEntry {
+pub fn getEntries(dirinfo: *const DirInfo) ?*Fs.DirEntry {
var entries_ptr = Fs.FileSystem.instance.fs.entries.atIndex(dirinfo.entries) orelse return null;
switch (entries_ptr.*) {
.entries => |entr| {
@@ -57,7 +57,7 @@ pub fn getEntries(dirinfo: *const DirInfo) ?*Fs.FileSystem.DirEntry {
}
}
-pub fn getEntriesConst(dirinfo: *const DirInfo) ?*const Fs.FileSystem.DirEntry {
+pub fn getEntriesConst(dirinfo: *const DirInfo) ?*const Fs.DirEntry {
const entries_ptr = Fs.FileSystem.instance.fs.entries.atIndex(dirinfo.entries) orelse return null;
switch (entries_ptr.*) {
.entries => |entr| {
diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig
index 4b766b81e..fcd1b9fad 100644
--- a/src/resolver/resolver.zig
+++ b/src/resolver/resolver.zig
@@ -84,7 +84,7 @@ pub const Result = struct {
// This is true when the package was loaded from within the node_modules directory.
is_from_node_modules: bool = false,
- diff_case: ?Fs.FileSystem.Entry.Lookup.DifferentCase = null,
+ diff_case: ?Fs.Entry.Lookup.DifferentCase = null,
// If present, any ES6 imports to this file can be considered to have no side
// effects. This means they should be removed if unused.
@@ -309,13 +309,13 @@ pub const MatchResult = struct {
file_fd: StoredFileDescriptorType = 0,
is_node_module: bool = false,
package_json: ?*PackageJSON = null,
- diff_case: ?Fs.FileSystem.Entry.Lookup.DifferentCase = null,
+ diff_case: ?Fs.Entry.Lookup.DifferentCase = null,
dir_info: ?*DirInfo = null,
};
pub const LoadResult = struct {
path: string,
- diff_case: ?Fs.FileSystem.Entry.Lookup.DifferentCase,
+ diff_case: ?Fs.Entry.Lookup.DifferentCase,
dirname_fd: StoredFileDescriptorType = 0,
file_fd: StoredFileDescriptorType = 0,
dir_info: ?*DirInfo = null,
@@ -1424,7 +1424,7 @@ pub const Resolver = struct {
// we cannot just use "/"
// we will write to the buffer past the ptr len so it must be a non-const buffer
path[0..1];
- var rfs: *Fs.FileSystem.RealFS = &r.fs.fs;
+ var rfs: *Fs.RealFS = &r.fs.fs;
rfs.entries_mutex.lock();
defer rfs.entries_mutex.unlock();
@@ -1600,7 +1600,7 @@ pub const Resolver = struct {
var cached_dir_entry_result = rfs.entries.getOrPut(dir_path) catch unreachable;
- var dir_entries_option: *Fs.FileSystem.RealFS.EntriesOption = undefined;
+ var dir_entries_option: *Fs.EntriesOption = undefined;
var needs_iter: bool = true;
if (rfs.entries.atIndex(cached_dir_entry_result.index)) |cached_entry| {
@@ -1612,7 +1612,7 @@ pub const Resolver = struct {
if (needs_iter) {
dir_entries_option = try rfs.entries.put(&cached_dir_entry_result, .{
- .entries = Fs.FileSystem.DirEntry.init(dir_path, r.fs.allocator),
+ .entries = Fs.DirEntry.init(dir_path, r.fs.allocator),
});
if (FeatureFlags.store_file_descriptors) {
@@ -2261,7 +2261,7 @@ pub const Resolver = struct {
}
pub fn loadAsFile(r: *ThisResolver, path: string, extension_order: []const string) ?LoadResult {
- var rfs: *Fs.FileSystem.RealFS = &r.fs.fs;
+ var rfs: *Fs.RealFS = &r.fs.fs;
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Attempting to load \"{s}\" as a file", .{path}) catch {};
@@ -2275,14 +2275,14 @@ pub const Resolver = struct {
const dir_path = Dirname.dirname(path);
- const dir_entry: *Fs.FileSystem.RealFS.EntriesOption = rfs.readDirectory(
+ const dir_entry: *Fs.EntriesOption = rfs.readDirectory(
dir_path,
null,
) catch {
return null;
};
- if (@as(Fs.FileSystem.RealFS.EntriesOption.Tag, dir_entry.*) == .err) {
+ if (@as(Fs.EntriesOption.Tag, dir_entry.*) == .err) {
if (dir_entry.err.original_err != error.ENOENT) {
r.log.addErrorFmt(
null,
@@ -2442,7 +2442,7 @@ pub const Resolver = struct {
r: *ThisResolver,
info: *DirInfo,
path: string,
- _entries: *Fs.FileSystem.RealFS.EntriesOption,
+ _entries: *Fs.EntriesOption,
_result: allocators.Result,
dir_entry_index: allocators.IndexType,
parent: ?*DirInfo,
@@ -2451,7 +2451,7 @@ pub const Resolver = struct {
) anyerror!void {
var result = _result;
- var rfs: *Fs.FileSystem.RealFS = &r.fs.fs;
+ var rfs: *Fs.RealFS = &r.fs.fs;
var entries = _entries.entries;
info.* = DirInfo{
diff --git a/src/router.zig b/src/router.zig
index 16e947248..018de4e30 100644
--- a/src/router.zig
+++ b/src/router.zig
@@ -232,7 +232,7 @@ pub const Route = struct {
hash: u32,
children: Ptr = Ptr{},
parent: u16 = top_level_parent,
- entry: *Fs.FileSystem.Entry,
+ entry: *Fs.Entry,
full_hash: u32,
@@ -241,7 +241,7 @@ pub const Route = struct {
pub const List = std.MultiArrayList(Route);
pub const Ptr = TinyPtr;
- pub fn parse(base: string, dir: string, extname: string, entry: *Fs.FileSystem.Entry) Route {
+ pub fn parse(base: string, dir: string, extname: string, entry: *Fs.Entry) Route {
const ensure_slash = if (dir.len > 0 and dir[dir.len - 1] != '/') "/" else "";
var parts = [3]string{ dir, ensure_slash, base };