diff options
author | 2023-05-08 21:14:56 -0700 | |
---|---|---|
committer | 2023-05-08 21:14:56 -0700 | |
commit | e422c849d5ef3d5ba40a02f2fe3c1409075640a0 (patch) | |
tree | 359e2aec9d3b961f26dfef0c32e20c564370aeec | |
parent | c6c21eeba749a5ebbc7a3f9dc3a0f7e5a702e0da (diff) | |
download | bun-e422c849d5ef3d5ba40a02f2fe3c1409075640a0.tar.gz bun-e422c849d5ef3d5ba40a02f2fe3c1409075640a0.tar.zst bun-e422c849d5ef3d5ba40a02f2fe3c1409075640a0.zip |
Fix bug in Bun.build() where it wouldn't pick up changes to directories on rebuilds (#2824)
Co-authored-by: Jarred Sumner <709451+Jarred-Sumner@users.noreply.github.com>
-rw-r--r-- | src/bun.zig | 37 | ||||
-rw-r--r-- | src/bun_js.zig | 2 | ||||
-rw-r--r-- | src/bundler.zig | 2 | ||||
-rw-r--r-- | src/bundler/bundle_v2.zig | 6 | ||||
-rw-r--r-- | src/cli/run_command.zig | 7 | ||||
-rw-r--r-- | src/cli/test_command.zig | 5 | ||||
-rw-r--r-- | src/fs.zig | 195 | ||||
-rw-r--r-- | src/install/install.zig | 4 | ||||
-rw-r--r-- | src/install/lockfile.zig | 6 | ||||
-rw-r--r-- | src/options.zig | 6 | ||||
-rw-r--r-- | src/resolver/dir_info.zig | 6 | ||||
-rw-r--r-- | src/resolver/resolver.zig | 143 | ||||
-rw-r--r-- | src/router.zig | 2 | ||||
-rw-r--r-- | test/bundler/bundler-reloader-script.ts | 41 | ||||
-rw-r--r-- | test/bundler/bundler_reloadable.test.ts | 17 |
15 files changed, 369 insertions, 110 deletions
diff --git a/src/bun.zig b/src/bun.zig index d3da6126f..dce5d95ad 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -784,6 +784,31 @@ pub const StringHashMapContext = struct { return strings.eqlLong(a, b, true); } }; + + pub const PrehashedCaseInsensitive = struct { + value: u64, + input: []const u8, + pub fn init(allocator: std.mem.Allocator, input: []const u8) PrehashedCaseInsensitive { + var out = allocator.alloc(u8, input.len) catch unreachable; + _ = strings.copyLowercase(input, out); + return PrehashedCaseInsensitive{ + .value = StringHashMapContext.hash(.{}, out), + .input = out, + }; + } + pub fn deinit(this: @This(), allocator: std.mem.Allocator) void { + allocator.free(this.input); + } + pub fn hash(this: @This(), s: []const u8) u64 { + if (s.ptr == this.input.ptr and s.len == this.input.len) + return this.value; + return StringHashMapContext.hash(.{}, s); + } + + pub fn eql(_: @This(), a: []const u8, b: []const u8) bool { + return strings.eqlCaseInsensitiveASCIIICheckLength(a, b); + } + }; }; pub fn StringArrayHashMap(comptime Type: type) type { @@ -1468,3 +1493,15 @@ pub const MaxHeapAllocator = @import("./max_heap_allocator.zig").MaxHeapAllocato pub const tracy = @import("./tracy.zig"); pub const trace = tracy.trace; + +pub fn openFileForPath(path_: [:0]const u8) !std.fs.File { + const O_PATH = if (comptime Environment.isLinux) std.os.O.PATH else std.os.O.RDONLY; + const flags: u32 = std.os.O.CLOEXEC | std.os.O.NOCTTY | O_PATH; + + const fd = try std.os.openZ(path_, flags, 0); + return std.fs.File{ + .handle = fd, + }; +} + +pub const Generation = u16; diff --git a/src/bun_js.zig b/src/bun_js.zig index b2d8ab77c..af1dda3d9 100644 --- a/src/bun_js.zig +++ b/src/bun_js.zig @@ -93,6 +93,8 @@ pub const Run = struct { b.options.macro_remap = macros; } + b.resolver.store_fd = ctx.debug.hot_reload != .none; + b.configureRouter(false) catch { if (Output.enable_ansi_colors_stderr) { vm.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), true) catch {}; diff --git a/src/bundler.zig b/src/bundler.zig index 011634b7a..0f98a9ffb 100644 --- a/src/bundler.zig +++ b/src/bundler.zig @@ -505,7 +505,7 @@ pub const Bundler = struct { switch (this.options.env.behavior) { .prefix, .load_all => { // Step 1. Load the project root. - var dir: *Fs.FileSystem.DirEntry = ((this.resolver.readDirInfo(this.fs.top_level_dir) catch return) orelse return).getEntries() orelse return; + var dir: *Fs.FileSystem.DirEntry = ((this.resolver.readDirInfo(this.fs.top_level_dir) catch return) orelse return).getEntries(this.resolver.generation) orelse return; // Process always has highest priority. const was_production = this.options.production; diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index 1ef06c94c..9502f5dfe 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -1490,7 +1490,7 @@ pub const BundleV2 = struct { while (true) { while (instance.queue.pop()) |completion| { - generateInNewThread(completion) catch |err| { + generateInNewThread(completion, instance.generation) catch |err| { completion.result = .{ .err = err }; var concurrent_task = bun.default_allocator.create(JSC.ConcurrentTask) catch unreachable; concurrent_task.* = JSC.ConcurrentTask{ @@ -1502,6 +1502,7 @@ pub const BundleV2 = struct { }; any = true; } + instance.generation +|= 1; if (any) { bun.Mimalloc.mi_collect(false); @@ -1513,12 +1514,14 @@ pub const BundleV2 = struct { pub const BundleThread = struct { waker: bun.AsyncIO.Waker, queue: bun.UnboundedQueue(JSBundleCompletionTask, .next) = .{}, + generation: bun.Generation = 0, pub var created = false; pub var instance: *BundleThread = undefined; }; fn generateInNewThread( completion: *JSBundleCompletionTask, + generation: bun.Generation, ) !void { var heap = try ThreadlocalArena.init(); defer heap.deinit(); @@ -1564,6 +1567,7 @@ pub const BundleV2 = struct { bundler.options.minify_identifiers = config.minify.identifiers; bundler.options.inlining = config.minify.syntax; bundler.options.source_map = config.source_map; + bundler.resolver.generation = generation; try bundler.configureDefines(); bundler.configureLinker(); diff --git a/src/cli/run_command.zig b/src/cli/run_command.zig index b2b5cf0e9..0077e8179 100644 --- a/src/cli/run_command.zig +++ b/src/cli/run_command.zig @@ -503,7 +503,7 @@ pub const RunCommand = struct { // This would cause potentially a different .env file to be loaded this_bundler.runEnvLoader() catch {}; - if (root_dir_info.getEntries()) |dir| { + if (root_dir_info.getEntries(0)) |dir| { // Run .env again if it exists in a parent dir if (this_bundler.options.production) { this_bundler.env.load(&this_bundler.fs.fs, dir, false) catch {}; @@ -654,6 +654,7 @@ pub const RunCommand = struct { this_bundler.resolver.care_about_bin_folder = true; this_bundler.resolver.care_about_scripts = true; + this_bundler.resolver.store_fd = true; defer { this_bundler.resolver.care_about_bin_folder = false; this_bundler.resolver.care_about_scripts = false; @@ -699,7 +700,7 @@ pub const RunCommand = struct { var dir_slice: string = ""; while (iter.next()) |entry| { const value = entry.value_ptr.*; - if (value.kind(&this_bundler.fs.fs) == .file) { + if (value.kind(&this_bundler.fs.fs, true) == .file) { if (!has_copied) { bun.copy(u8, &path_buf, value.dir); dir_slice = path_buf[0..value.dir.len]; @@ -736,7 +737,7 @@ pub const RunCommand = struct { !strings.contains(name, ".d.ts") and !strings.contains(name, ".d.mts") and !strings.contains(name, ".d.cts") and - value.kind(&this_bundler.fs.fs) == .file) + value.kind(&this_bundler.fs.fs, true) == .file) { _ = try results.getOrPut(this_bundler.fs.filename_store.append(@TypeOf(name), name) catch continue); } diff --git a/src/cli/test_command.zig b/src/cli/test_command.zig index 07a86cc3a..b5ab42672 100644 --- a/src/cli/test_command.zig +++ b/src/cli/test_command.zig @@ -230,7 +230,7 @@ const Scanner = struct { }; fn readDirWithName(this: *Scanner, name: string, handle: ?std.fs.Dir) !*FileSystem.RealFS.EntriesOption { - return try this.fs.fs.readDirectoryWithIterator(name, handle, *Scanner, this); + return try this.fs.fs.readDirectoryWithIterator(name, handle, 0, true, *Scanner, this); } pub fn scan(this: *Scanner, path_literal: string) void { @@ -306,7 +306,7 @@ const Scanner = struct { pub fn next(this: *Scanner, entry: *FileSystem.Entry, fd: bun.StoredFileDescriptorType) void { const name = entry.base_lowercase(); this.has_iterated = true; - switch (entry.kind(&this.fs.fs)) { + switch (entry.kind(&this.fs.fs, true)) { .dir => { if ((name.len > 0 and name[0] == '.') or strings.eqlComptime(name, "node_modules")) { return; @@ -403,6 +403,7 @@ pub const TestCommand = struct { var vm = try JSC.VirtualMachine.init(ctx.allocator, ctx.args, null, ctx.log, env_loader); vm.argv = ctx.passthrough; vm.preload = ctx.preloads; + vm.bundler.resolver.store_fd = true; try vm.bundler.configureDefines(); vm.bundler.options.rewrite_jest_for_tests = true; diff --git a/src/fs.zig b/src/fs.zig index cee7fef72..8d6392751 100644 --- a/src/fs.zig +++ b/src/fs.zig @@ -22,7 +22,7 @@ const allocators = @import("./allocators.zig"); pub const MAX_PATH_BYTES = bun.MAX_PATH_BYTES; pub const PathBuffer = [bun.MAX_PATH_BYTES]u8; -const debug = Output.scoped(.fs, false); +pub const debug = Output.scoped(.fs, false); // pub const FilesystemImplementation = @import("fs_impl.zig"); @@ -193,13 +193,14 @@ pub const FileSystem = struct { pub const EntryStore = allocators.BSSList(Entry, Preallocate.Counts.files); dir: string, fd: StoredFileDescriptorType = 0, + generation: bun.Generation = 0, data: EntryMap, // pub fn removeEntry(dir: *DirEntry, name: string) !void { // // dir.data.remove(name); // } - pub fn addEntry(dir: *DirEntry, entry: std.fs.IterableDir.Entry, allocator: std.mem.Allocator, comptime Iterator: type, iterator: Iterator) !void { + pub fn addEntry(dir: *DirEntry, prev_map: ?*EntryMap, entry: std.fs.IterableDir.Entry, allocator: std.mem.Allocator, comptime Iterator: type, iterator: Iterator) !void { const _kind: Entry.Kind = switch (entry.kind) { .Directory => .dir, // This might be wrong! @@ -207,34 +208,57 @@ pub const FileSystem = struct { .File => .file, else => return, }; - // entry.name only lives for the duration of the iteration - const name = try strings.StringOrTinyString.initAppendIfNeeded( - entry.name, - *FileSystem.FilenameStore, - &FileSystem.FilenameStore.instance, - ); + const stored = try brk: { + if (prev_map) |map| { + var stack_fallback = std.heap.stackFallback(512, allocator); + const stack = stack_fallback.get(); + const prehashed = bun.StringHashMapContext.PrehashedCaseInsensitive.init(stack, entry.name); + defer prehashed.deinit(stack); + if (map.getAdapted(entry.name, prehashed)) |existing| { + existing.mutex.lock(); + defer existing.mutex.unlock(); + existing.dir = dir.dir; + + existing.need_stat = existing.need_stat or existing.cache.kind != _kind; + // TODO: is this right? + if (existing.cache.kind != _kind) { + existing.cache.kind = _kind; + + existing.cache.symlink = PathString.empty; + } + break :brk existing; + } + } - const name_lowercased = try strings.StringOrTinyString.initLowerCaseAppendIfNeeded( - entry.name, - *FileSystem.FilenameStore, - &FileSystem.FilenameStore.instance, - ); + // entry.name only lives for the duration of the iteration + const name = try strings.StringOrTinyString.initAppendIfNeeded( + entry.name, + *FileSystem.FilenameStore, + &FileSystem.FilenameStore.instance, + ); - const stored = try EntryStore.instance.append(.{ - .base_ = name, - .base_lowercase_ = name_lowercased, - .dir = dir.dir, - .mutex = Mutex.init(), - // Call "stat" lazily for performance. The "@material-ui/icons" package - // contains a directory with over 11,000 entries in it and running "stat" - // for each entry was a big performance issue for that package. - .need_stat = entry.kind == .SymLink, - .cache = .{ - .symlink = PathString.empty, - .kind = _kind, - }, - }); + const name_lowercased = try strings.StringOrTinyString.initLowerCaseAppendIfNeeded( + entry.name, + *FileSystem.FilenameStore, + &FileSystem.FilenameStore.instance, + ); + + break :brk EntryStore.instance.append(.{ + .base_ = name, + .base_lowercase_ = name_lowercased, + .dir = dir.dir, + .mutex = Mutex.init(), + // Call "stat" lazily for performance. The "@material-ui/icons" package + // contains a directory with over 11,000 entries in it and running "stat" + // for each entry was a big performance issue for that package. + .need_stat = entry.kind == .SymLink, + .cache = .{ + .symlink = PathString.empty, + .kind = _kind, + }, + }); + }; const stored_name = stored.base(); @@ -253,12 +277,16 @@ pub const FileSystem = struct { } } - pub fn init(dir: string) DirEntry { + pub fn init(dir: string, generation: bun.Generation) DirEntry { if (comptime FeatureFlags.verbose_fs) { Output.prettyln("\n {s}", .{dir}); } - return .{ .dir = dir, .data = .{} }; + return .{ + .dir = dir, + .data = .{}, + .generation = generation, + }; } pub const Err = struct { @@ -402,21 +430,21 @@ pub const FileSystem = struct { file, }; - pub fn kind(entry: *Entry, fs: *Implementation) Kind { + pub fn kind(entry: *Entry, fs: *Implementation, store_fd: bool) Kind { if (entry.need_stat) { entry.need_stat = false; // This is technically incorrect, but we are choosing not to handle errors here - entry.cache = fs.kind(entry.dir, entry.base(), entry.cache.fd) catch return entry.cache.kind; + entry.cache = fs.kind(entry.dir, entry.base(), entry.cache.fd, store_fd) catch return entry.cache.kind; } return entry.cache.kind; } - pub fn symlink(entry: *Entry, fs: *Implementation) string { + pub fn symlink(entry: *Entry, fs: *Implementation, store_fd: bool) string { if (entry.need_stat) { entry.need_stat = false; // This is technically incorrect, but we are choosing not to handle errors here // This error can happen if the file was deleted between the time the directory was scanned and the time it was read - entry.cache = fs.kind(entry.dir, entry.base(), entry.cache.fd) catch return ""; + entry.cache = fs.kind(entry.dir, entry.base(), entry.cache.fd, store_fd) catch return ""; } return entry.cache.symlink.slice(); } @@ -561,6 +589,38 @@ pub const FileSystem = struct { })).dir; } + pub fn entriesAt(this: *RealFS, index: allocators.IndexType, generation: bun.Generation) ?*EntriesOption { + var existing = this.entries.atIndex(index) orelse return null; + if (existing.* == .entries) { + if (existing.entries.generation < generation) { + var handle = std.fs.Dir.openIterableDir(std.fs.cwd(), existing.entries.dir, .{}) catch |err| { + existing.entries.data.clearAndFree(bun.fs_allocator); + + return this.readDirectoryError(existing.entries.dir, err) catch unreachable; + }; + defer handle.close(); + + const new_entry = this.readdir( + false, + &existing.entries.data, + existing.entries.dir, + generation, + handle.dir, + + void, + void{}, + ) catch |err| { + existing.entries.data.clearAndFree(bun.fs_allocator); + return this.readDirectoryError(existing.entries.dir, err) catch unreachable; + }; + existing.entries.data.clearAndFree(bun.fs_allocator); + existing.entries.* = new_entry; + } + } + + return existing; + } + pub fn getDefaultTempDir() string { return bun.getenvZ("BUN_TMPDIR") orelse bun.getenvZ("TMPDIR") orelse PLATFORM_TMP_DIR; } @@ -813,26 +873,32 @@ pub const FileSystem = struct { fn readdir( fs: *RealFS, + store_fd: bool, + prev_map: ?*DirEntry.EntryMap, _dir: string, + generation: bun.Generation, handle: std.fs.Dir, comptime Iterator: type, iterator: Iterator, ) !DirEntry { _ = fs; + var iter = (std.fs.IterableDir{ .dir = handle }).iterate(); - var dir = DirEntry.init(_dir); + var dir = DirEntry.init(_dir, generation); const allocator = bun.fs_allocator; errdefer dir.deinit(allocator); - if (FeatureFlags.store_file_descriptors) { + if (store_fd) { FileSystem.setMaxFd(handle.fd); dir.fd = handle.fd; } while (try iter.next()) |_entry| { - try dir.addEntry(_entry, allocator, Iterator, iterator); + try dir.addEntry(prev_map, _entry, allocator, Iterator, iterator); } + debug("readdir({d}, {s}) = {d}", .{ handle.fd, _dir, dir.data.count() }); + return dir; } @@ -854,11 +920,17 @@ pub const FileSystem = struct { threadlocal var temp_entries_option: EntriesOption = undefined; - pub fn readDirectory(fs: *RealFS, _dir: string, _handle: ?std.fs.Dir) !*EntriesOption { - return readDirectoryWithIterator(fs, _dir, _handle, void, {}); + pub fn readDirectory( + fs: *RealFS, + _dir: string, + _handle: ?std.fs.Dir, + generation: bun.Generation, + store_fd: bool, + ) !*EntriesOption { + return readDirectoryWithIterator(fs, _dir, _handle, generation, store_fd, void, {}); } - pub fn readDirectoryWithIterator(fs: *RealFS, _dir: string, _handle: ?std.fs.Dir, comptime Iterator: type, iterator: Iterator) !*EntriesOption { + pub fn readDirectoryWithIterator(fs: *RealFS, _dir: string, _handle: ?std.fs.Dir, generation: bun.Generation, store_fd: bool, comptime Iterator: type, iterator: Iterator) !*EntriesOption { var dir = _dir; var cache_result: ?allocators.Result = null; if (comptime FeatureFlags.enable_entry_cache) { @@ -869,13 +941,18 @@ pub const FileSystem = struct { fs.entries_mutex.unlock(); } } + var in_place: ?*DirEntry = null; if (comptime FeatureFlags.enable_entry_cache) { cache_result = try fs.entries.getOrPut(dir); if (cache_result.?.hasCheckedIfExists()) { if (fs.entries.atIndex(cache_result.?.index)) |cached_result| { - return cached_result; + if (cached_result.* != .entries or (cached_result.* == .entries and cached_result.entries.generation >= generation)) { + return cached_result; + } + + in_place = cached_result.entries; } } } @@ -883,28 +960,43 @@ pub const FileSystem = struct { var handle = _handle orelse try fs.openDir(dir); defer { - if (_handle == null and fs.needToCloseFiles()) { + if (_handle == null and (!store_fd or fs.needToCloseFiles())) { handle.close(); } } // if we get this far, it's a real directory, so we can just store the dir name. if (_handle == null) { - dir = try DirnameStore.instance.append(string, _dir); + dir = try if (in_place) |existing| + existing.dir + else + DirnameStore.instance.append(string, _dir); } // Cache miss: read the directory entries var entries = fs.readdir( + store_fd, + if (in_place) |existing| &existing.data else null, dir, + generation, handle, + Iterator, iterator, ) catch |err| { + if (in_place) |existing| existing.data.clearAndFree(bun.fs_allocator); + return fs.readDirectoryError(dir, err) catch unreachable; }; if (comptime FeatureFlags.enable_entry_cache) { - var entries_ptr = bun.fs_allocator.create(DirEntry) catch unreachable; + var entries_ptr = in_place orelse bun.fs_allocator.create(DirEntry) catch unreachable; + if (in_place) |original| { + original.data.clearAndFree(bun.fs_allocator); + } + if (store_fd and entries.fd == 0) + entries.fd = handle.fd; + entries_ptr.* = entries; const result = EntriesOption{ .entries = entries_ptr, @@ -1041,7 +1133,13 @@ pub const FileSystem = struct { return File{ .path = Path.init(path), .contents = file_contents }; } - pub fn kind(fs: *RealFS, _dir: string, base: string, existing_fd: StoredFileDescriptorType) !Entry.Cache { + pub fn kind( + fs: *RealFS, + _dir: string, + base: string, + existing_fd: StoredFileDescriptorType, + store_fd: bool, + ) !Entry.Cache { var dir = _dir; var combo = [2]string{ dir, base }; var outpath: [bun.MAX_PATH_BYTES]u8 = undefined; @@ -1062,11 +1160,16 @@ pub const FileSystem = struct { var symlink: []const u8 = ""; if (is_symlink) { - var file = if (existing_fd != 0) std.fs.File{ .handle = existing_fd } else try std.fs.openFileAbsoluteZ(absolute_path_c, .{ .mode = .read_only }); + var file = try if (existing_fd != 0) + std.fs.File{ .handle = existing_fd } + else if (store_fd) + std.fs.openFileAbsoluteZ(absolute_path_c, .{ .mode = .read_only }) + else + bun.openFileForPath(absolute_path_c); setMaxFd(file.handle); defer { - if (fs.needToCloseFiles() and existing_fd == 0) { + if ((!store_fd or fs.needToCloseFiles()) and existing_fd == 0) { file.close(); } else if (comptime FeatureFlags.store_file_descriptors) { cache.fd = file.handle; diff --git a/src/install/install.zig b/src/install/install.zig index 526b540ee..dad1c4f99 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -5019,7 +5019,7 @@ pub const PackageManager = struct { bun.copy(u8, &package_json_cwd_buf, fs.top_level_dir); bun.copy(u8, package_json_cwd_buf[fs.top_level_dir.len..], "package.json"); - var entries_option = try fs.fs.readDirectory(fs.top_level_dir, null); + var entries_option = try fs.fs.readDirectory(fs.top_level_dir, null, 0, true); var options = Options{ .global = cli.global, }; @@ -5128,6 +5128,8 @@ pub const PackageManager = struct { var root_dir = try Fs.FileSystem.instance.fs.readDirectory( Fs.FileSystem.instance.top_level_dir, null, + 0, + true, ); // var progress = Progress{}; // var node = progress.start(name: []const u8, estimated_total_items: usize) diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index f2cab5ed9..61a71c970 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -906,7 +906,7 @@ pub const Printer = struct { var fs = &FileSystem.instance; var options = PackageManager.Options{}; - var entries_option = try fs.fs.readDirectory(fs.top_level_dir, null); + var entries_option = try fs.fs.readDirectory(fs.top_level_dir, null, 0, true); var env_loader: *DotEnv.Loader = brk: { var map = try allocator.create(DotEnv.Map); @@ -2592,6 +2592,8 @@ pub const Package = extern struct { const entries_option = FileSystem.instance.fs.readDirectory( dir_prefix, null, + 0, + true, ) catch |err| switch (err) { error.FileNotFound => { log.addWarningFmt( @@ -2624,7 +2626,7 @@ pub const Package = extern struct { if (strings.eqlAnyComptime(name, skipped_names)) continue; var entry: *FileSystem.Entry = entry_iter.value_ptr.*; - if (entry.kind(&Fs.FileSystem.instance.fs) != .dir) continue; + if (entry.kind(&Fs.FileSystem.instance.fs, true) != .dir) continue; var parts = [2]string{ entry.dir, entry.base() }; var entry_path = Path.joinAbsStringBufZ( diff --git a/src/options.zig b/src/options.zig index 6563e5a89..958b36874 100644 --- a/src/options.zig +++ b/src/options.zig @@ -1681,18 +1681,18 @@ pub const BundleOptions = struct { if (!static_dir_set) { chosen_dir = choice: { - if (fs.fs.readDirectory(fs.top_level_dir, null)) |dir_| { + if (fs.fs.readDirectory(fs.top_level_dir, null, 0, false)) |dir_| { const dir: *const Fs.FileSystem.RealFS.EntriesOption = dir_; switch (dir.*) { .entries => { if (dir.entries.getComptimeQuery("public")) |q| { - if (q.entry.kind(&fs.fs) == .dir) { + if (q.entry.kind(&fs.fs, true) == .dir) { break :choice "public"; } } if (dir.entries.getComptimeQuery("static")) |q| { - if (q.entry.kind(&fs.fs) == .dir) { + if (q.entry.kind(&fs.fs, true) == .dir) { break :choice "static"; } } diff --git a/src/resolver/dir_info.zig b/src/resolver/dir_info.zig index 66d589cfa..92d6c2d75 100644 --- a/src/resolver/dir_info.zig +++ b/src/resolver/dir_info.zig @@ -74,15 +74,15 @@ pub fn getFileDescriptor(dirinfo: *const DirInfo) StoredFileDescriptorType { return 0; } - if (dirinfo.getEntries()) |entries| { + if (dirinfo.getEntries(0)) |entries| { return entries.fd; } else { return 0; } } -pub fn getEntries(dirinfo: *const DirInfo) ?*Fs.FileSystem.DirEntry { - var entries_ptr = Fs.FileSystem.instance.fs.entries.atIndex(dirinfo.entries) orelse return null; +pub fn getEntries(dirinfo: *const DirInfo, generation: bun.Generation) ?*Fs.FileSystem.DirEntry { + var entries_ptr = Fs.FileSystem.instance.fs.entriesAt(dirinfo.entries, generation) orelse return null; switch (entries_ptr.*) { .entries => { return entries_ptr.entries; diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig index b170ca350..d08b42943 100644 --- a/src/resolver/resolver.zig +++ b/src/resolver/resolver.zig @@ -478,10 +478,12 @@ pub const Resolver = struct { watcher: ?AnyResolveWatcher = null, caches: CacheSet, + generation: bun.Generation = 0, package_manager: ?*PackageManager = null, onWakePackageManager: PackageManager.WakeHandler = .{}, env_loader: ?*DotEnv.Loader = null, + store_fd: bool = false, // These are sets that represent various conditions for the "exports" field // in package.json. @@ -944,9 +946,9 @@ pub const Resolver = struct { module_type = ModuleTypeMap.getWithLength(path.name.ext, 4) orelse .unknown; } - if (dir.getEntries()) |entries| { + if (dir.getEntries(r.generation)) |entries| { if (entries.get(path.name.filename)) |query| { - const symlink_path = query.entry.symlink(&r.fs.fs); + const symlink_path = query.entry.symlink(&r.fs.fs, r.store_fd); if (symlink_path.len > 0) { path.setRealpath(symlink_path); if (result.file_fd == 0) result.file_fd = query.entry.cache.fd; @@ -960,12 +962,17 @@ pub const Resolver = struct { var out = r.fs.absBuf(&parts, &buf); + const store_fd = r.store_fd; + if (query.entry.cache.fd == 0) { buf[out.len] = 0; const span = buf[0..out.len :0]; - var file = try std.fs.openFileAbsoluteZ(span, .{ .mode = .read_only }); + var file = try if (store_fd) + std.fs.openFileAbsoluteZ(span, .{ .mode = .read_only }) + else + bun.openFileForPath(span); - if (comptime !FeatureFlags.store_file_descriptors) { + if (!store_fd) { out = try bun.getFdPath(query.entry.cache.fd, &buf); file.close(); } else { @@ -984,7 +991,7 @@ pub const Resolver = struct { } } - if (comptime FeatureFlags.store_file_descriptors) { + if (store_fd) { out = try bun.getFdPath(query.entry.cache.fd, &buf); } @@ -993,7 +1000,7 @@ pub const Resolver = struct { debug.addNoteFmt("Resolved symlink \"{s}\" to \"{s}\"", .{ symlink, path.text }); } query.entry.cache.symlink = PathString.init(symlink); - if (result.file_fd == 0) result.file_fd = query.entry.cache.fd; + if (result.file_fd == 0 and store_fd) result.file_fd = query.entry.cache.fd; path.setRealpath(symlink); } @@ -1912,6 +1919,7 @@ pub const Resolver = struct { var dir_entries_option: *Fs.FileSystem.RealFS.EntriesOption = undefined; var needs_iter = true; + var in_place: ?*Fs.FileSystem.DirEntry = null; var open_dir = std.fs.cwd().openIterableDir(dir_path, .{}) catch |err| { switch (err) { error.FileNotFound => unreachable, @@ -1925,27 +1933,46 @@ pub const Resolver = struct { if (rfs.entries.atIndex(cached_dir_entry_result.index)) |cached_entry| { if (cached_entry.* == .entries) { - dir_entries_option = cached_entry; - needs_iter = false; + if (cached_entry.entries.generation >= r.generation) { + dir_entries_option = cached_entry; + needs_iter = false; + } else { + in_place = cached_entry.entries; + } } } if (needs_iter) { const allocator = bun.fs_allocator; - var dir_entries_ptr = allocator.create(Fs.FileSystem.DirEntry) catch unreachable; - dir_entries_ptr.* = Fs.FileSystem.DirEntry.init( - Fs.FileSystem.DirnameStore.instance.append(string, dir_path) catch unreachable, + var new_entry = Fs.FileSystem.DirEntry.init( + if (in_place) |existing| existing.dir else Fs.FileSystem.DirnameStore.instance.append(string, dir_path) catch unreachable, + r.generation, ); - if (FeatureFlags.store_file_descriptors) { - Fs.FileSystem.setMaxFd(open_dir.dir.fd); - dir_entries_ptr.fd = open_dir.dir.fd; - } var dir_iterator = open_dir.iterate(); while (dir_iterator.next() catch null) |_value| { - dir_entries_ptr.addEntry(_value, allocator, void, {}) catch unreachable; + new_entry.addEntry( + if (in_place) |existing| &existing.data else null, + _value, + allocator, + void, + {}, + ) catch unreachable; + } + if (in_place) |existing| { + existing.data.clearAndFree(allocator); + } + + var dir_entries_ptr = in_place orelse allocator.create(Fs.FileSystem.DirEntry) catch unreachable; + dir_entries_ptr.* = new_entry; + + if (r.store_fd) { + Fs.FileSystem.setMaxFd(open_dir.dir.fd); + dir_entries_ptr.fd = open_dir.dir.fd; } + bun.fs.debug("readdir({d}, {s}) = {d}", .{ open_dir.dir.fd, dir_path, dir_entries_ptr.data.count() }); + dir_entries_option = rfs.entries.put(&cached_dir_entry_result, .{ .entries = dir_entries_ptr, }) catch unreachable; @@ -2102,7 +2129,7 @@ pub const Resolver = struct { esm_resolution.status = .ModuleNotFound; return null; }; - const entries = resolved_dir_info.getEntries() orelse { + const entries = resolved_dir_info.getEntries(r.generation) orelse { esm_resolution.status = .ModuleNotFound; return null; }; @@ -2135,21 +2162,21 @@ pub const Resolver = struct { return null; }; - if (entry_query.entry.kind(&r.fs.fs) == .dir) { + if (entry_query.entry.kind(&r.fs.fs, r.store_fd) == .dir) { const ends_with_star = esm_resolution.status == .ExactEndsWithStar; esm_resolution.status = .UnsupportedDirectoryImport; // Try to have a friendly error message if people forget the "/index.js" suffix if (ends_with_star) { if (r.dirInfoCached(abs_esm_path) catch null) |dir_info| { - if (dir_info.getEntries()) |dir_entries| { + if (dir_info.getEntries(r.generation)) |dir_entries| { const index = "index"; bun.copy(u8, bufs(.load_as_file), index); for (extension_order) |ext| { var file_name = bufs(.load_as_file)[0 .. index.len + ext.len]; bun.copy(u8, file_name[index.len..], ext); const index_query = dir_entries.get(file_name); - if (index_query != null and index_query.?.entry.kind(&r.fs.fs) == .file) { + if (index_query != null and index_query.?.entry.kind(&r.fs.fs, r.store_fd) == .file) { missing_suffix = std.fmt.allocPrint(r.allocator, "/{s}", .{file_name}) catch unreachable; // defer r.allocator.free(missing_suffix); if (r.debug_logs) |*debug| { @@ -2237,6 +2264,8 @@ pub const Resolver = struct { false, null, ); + _ = bun.JSC.Node.Syscall.close(entry.fd); + // The file name needs to be persistent because it can have errors // and if those errors need to print the filename // then it will be undefined memory if we parse another tsconfig.json late @@ -2422,10 +2451,10 @@ pub const Resolver = struct { defer { // Anything - if (open_dir_count > 0 and r.fs.fs.needToCloseFiles()) { + if (open_dir_count > 0 and (!r.store_fd or r.fs.fs.needToCloseFiles())) { var open_dirs: []std.fs.IterableDir = bufs(.open_dirs)[0..open_dir_count]; for (open_dirs) |*open_dir| { - open_dir.dir.close(); + _ = bun.JSC.Node.Syscall.close(open_dir.dir.fd); } } } @@ -2459,12 +2488,14 @@ pub const Resolver = struct { path.ptr[queue_top.unsafe_path.len] = 0; defer path.ptr[queue_top.unsafe_path.len] = prev_char; var sentinel = path.ptr[0..queue_top.unsafe_path.len :0]; + _open_dir = std.fs.openIterableDirAbsoluteZ( sentinel, .{ .no_follow = !follow_symlinks, }, ); + bun.fs.debug("open({s}) = {any}", .{ sentinel, _open_dir }); // } } @@ -2548,30 +2579,46 @@ pub const Resolver = struct { var dir_entries_option: *Fs.FileSystem.RealFS.EntriesOption = undefined; var needs_iter: bool = true; + var in_place: ?*Fs.FileSystem.DirEntry = null; if (rfs.entries.atIndex(cached_dir_entry_result.index)) |cached_entry| { - if (cached_entry.* == .entries) { + if (cached_entry.entries.generation >= r.generation) { dir_entries_option = cached_entry; needs_iter = false; + } else { + in_place = cached_entry.entries; } } if (needs_iter) { const allocator = bun.fs_allocator; - var entries_ptr = allocator.create(Fs.FileSystem.DirEntry) catch unreachable; - entries_ptr.* = Fs.FileSystem.DirEntry.init(dir_path); - if (FeatureFlags.store_file_descriptors) { - Fs.FileSystem.setMaxFd(open_dir.dir.fd); - entries_ptr.fd = open_dir.dir.fd; - } + var new_entry = Fs.FileSystem.DirEntry.init( + if (in_place) |existing| existing.dir else Fs.FileSystem.DirnameStore.instance.append(string, dir_path) catch unreachable, + r.generation, + ); + var dir_iterator = open_dir.iterate(); - while (try dir_iterator.next()) |_value| { - entries_ptr.addEntry(_value, allocator, void, {}) catch unreachable; + while (dir_iterator.next() catch null) |_value| { + new_entry.addEntry( + if (in_place) |existing| &existing.data else null, + _value, + allocator, + void, + {}, + ) catch unreachable; } - + if (in_place) |existing| { + existing.data.clearAndFree(allocator); + } + if (!r.store_fd) { + new_entry.fd = 0; + } + var dir_entries_ptr = in_place orelse allocator.create(Fs.FileSystem.DirEntry) catch unreachable; + dir_entries_ptr.* = new_entry; dir_entries_option = try rfs.entries.put(&cached_dir_entry_result, .{ - .entries = entries_ptr, + .entries = dir_entries_ptr, }); + bun.fs.debug("readdir({d}, {s}) = {d}", .{ open_dir.dir.fd, dir_path, dir_entries_ptr.data.count() }); } // We must initialize it as empty so that the result index is correct. @@ -2597,7 +2644,7 @@ pub const Resolver = struct { } else if (queue_slice.len == 1) { // const next_in_queue = queue_slice[0]; // const next_basename = std.fs.path.basename(next_in_queue.unsafe_path); - // if (dir_info_ptr.getEntries()) |entries| { + // if (dir_info_ptr.getEntries(r.generation)) |entries| { // if (entries.get(next_basename) != null) { // return null; // } @@ -3011,9 +3058,9 @@ pub const Resolver = struct { base[0.."index".len].* = "index".*; bun.copy(u8, base["index".len..], ext); - if (dir_info.getEntries()) |entries| { + if (dir_info.getEntries(r.generation)) |entries| { if (entries.get(base)) |lookup| { - if (lookup.entry.kind(rfs) == .file) { + if (lookup.entry.kind(rfs, r.store_fd) == .file) { const out_buf = brk: { if (lookup.entry.abs_path.isEmpty()) { const parts = [_]string{ dir_info.abs_path, base }; @@ -3281,6 +3328,8 @@ pub const Resolver = struct { const dir_entry: *Fs.FileSystem.RealFS.EntriesOption = rfs.readDirectory( dir_path, null, + r.generation, + r.store_fd, ) catch { return null; }; @@ -3311,7 +3360,7 @@ pub const Resolver = struct { } if (entries.get(base)) |query| { - if (query.entry.kind(rfs) == .file) { + if (query.entry.kind(rfs, r.store_fd) == .file) { if (r.debug_logs) |*debug| { debug.addNoteFmt("Found file \"{s}\" ", .{base}); } @@ -3346,7 +3395,7 @@ pub const Resolver = struct { } if (entries.get(file_name)) |query| { - if (query.entry.kind(rfs) == .file) { + if (query.entry.kind(rfs, r.store_fd) == .file) { if (r.debug_logs) |*debug| { debug.addNoteFmt("Found file \"{s}\" ", .{buffer}); } @@ -3398,7 +3447,7 @@ pub const Resolver = struct { buffer[segment.len..buffer.len][0..ext_to_replace.len].* = ext_to_replace.*; if (entries.get(buffer)) |query| { - if (query.entry.kind(rfs) == .file) { + if (query.entry.kind(rfs, r.store_fd) == .file) { if (r.debug_logs) |*debug| { debug.addNoteFmt("Rewrote to \"{s}\" ", .{buffer}); } @@ -3480,7 +3529,7 @@ pub const Resolver = struct { // if (entries != null) { if (!info.isNodeModules()) { if (entries.getComptimeQuery("node_modules")) |entry| { - info.flags.setPresent(.has_node_modules, (entry.entry.kind(rfs)) == .dir); + info.flags.setPresent(.has_node_modules, (entry.entry.kind(rfs, r.store_fd)) == .dir); } } @@ -3512,7 +3561,7 @@ pub const Resolver = struct { if (info.isNodeModules()) { if (entries.getComptimeQuery(".bin")) |q| { - if (q.entry.kind(rfs) == .dir) { + if (q.entry.kind(rfs, r.store_fd) == .dir) { if (!bin_folders_loaded) { bin_folders_loaded = true; bin_folders = BinFolderArray.init(0) catch unreachable; @@ -3562,12 +3611,12 @@ pub const Resolver = struct { // Make sure "absRealPath" is the real path of the directory (resolving any symlinks) if (!r.opts.preserve_symlinks) { - if (parent.?.getEntries()) |parent_entries| { + if (parent.?.getEntries(r.generation)) |parent_entries| { if (parent_entries.get(base)) |lookup| { - if (entries.fd != 0 and lookup.entry.cache.fd == 0) lookup.entry.cache.fd = entries.fd; + if (entries.fd != 0 and lookup.entry.cache.fd == 0 and r.store_fd) lookup.entry.cache.fd = entries.fd; const entry = lookup.entry; - var symlink = entry.symlink(rfs); + var symlink = entry.symlink(rfs, r.store_fd); if (symlink.len > 0) { if (r.debug_logs) |*logs| { logs.addNote(std.fmt.allocPrint(r.allocator, "Resolved symlink \"{s}\" to \"{s}\"", .{ path, symlink }) catch unreachable); @@ -3592,7 +3641,7 @@ pub const Resolver = struct { // Record if this directory has a package.json file if (entries.getComptimeQuery("package.json")) |lookup| { const entry = lookup.entry; - if (entry.kind(rfs) == .file) { + if (entry.kind(rfs, r.store_fd) == .file) { info.package_json = if (r.usePackageManager() and !info.hasNodeModules() and !info.isNodeModules()) r.parsePackageJSON(path, if (FeatureFlags.store_file_descriptors) fd else 0, package_id, true) catch null else @@ -3625,7 +3674,7 @@ pub const Resolver = struct { if (r.opts.tsconfig_override == null) { if (entries.getComptimeQuery("tsconfig.json")) |lookup| { const entry = lookup.entry; - if (entry.kind(rfs) == .file) { + if (entry.kind(rfs, r.store_fd) == .file) { const parts = [_]string{ path, "tsconfig.json" }; tsconfig_path = r.fs.absBuf(&parts, bufs(.dir_info_uncached_filename)); @@ -3634,7 +3683,7 @@ pub const Resolver = struct { if (tsconfig_path == null) { if (entries.getComptimeQuery("jsconfig.json")) |lookup| { const entry = lookup.entry; - if (entry.kind(rfs) == .file) { + if (entry.kind(rfs, r.store_fd) == .file) { const parts = [_]string{ path, "jsconfig.json" }; tsconfig_path = r.fs.absBuf(&parts, bufs(.dir_info_uncached_filename)); } diff --git a/src/router.zig b/src/router.zig index eab4b5351..3e0dc0dba 100644 --- a/src/router.zig +++ b/src/router.zig @@ -431,7 +431,7 @@ const RouteLoader = struct { continue :outer; } - switch (entry.kind(&fs.fs)) { + switch (entry.kind(&fs.fs, false)) { .dir => { inline for (banned_dirs) |banned_dir| { if (strings.eqlComptime(entry.base(), comptime banned_dir)) { diff --git a/test/bundler/bundler-reloader-script.ts b/test/bundler/bundler-reloader-script.ts new file mode 100644 index 000000000..4a04ab448 --- /dev/null +++ b/test/bundler/bundler-reloader-script.ts @@ -0,0 +1,41 @@ +// This test serves two purposes: +// 1. If previously seen files are rebuilt, the second time it is rebuilt, we +// read the directory entries from the filesystem again. +// +// That way, if the developer changes a file, we will see the change. +// +// 2. Checks the file descriptor count to make sure we're not leaking any files between re-builds. +import { tmpdir } from "os"; +import { realpathSync, unlinkSync } from "fs"; +import { join } from "path"; +import { openSync, closeSync } from "fs"; +const tmp = realpathSync(tmpdir()); +const input = join(tmp, "input.js"); +const mutate = join(tmp, "mutate.js"); +try { + unlinkSync(mutate); +} catch (e) {} +await Bun.write(input, "import value from './mutate.js';\n" + `export default value;` + "\n"); + +await Bun.build({ + entrypoints: [input], +}); +await Bun.write(mutate, "export default 1;\n"); + +const maxfd = openSync(process.execPath, 0); +closeSync(maxfd); +const { outputs: second } = await Bun.build({ + entrypoints: [input], +}); +const text = await second?.[0]?.result?.text(); + +if (!text?.includes?.(" = 1")) { + throw new Error("Expected text to include ' = 1', but received\n\n" + text); +} + +const newMax = openSync(process.execPath, 0); +if (newMax !== maxfd) { + throw new Error("File descriptors leaked! Expected " + maxfd + " but got " + newMax + ""); +} + +process.exit(0); diff --git a/test/bundler/bundler_reloadable.test.ts b/test/bundler/bundler_reloadable.test.ts new file mode 100644 index 000000000..e05e76666 --- /dev/null +++ b/test/bundler/bundler_reloadable.test.ts @@ -0,0 +1,17 @@ +import { test, expect } from "bun:test"; +import { bunEnv, bunExe } from "harness"; +import { join } from "path"; + +test("rebuilding busts the directory entries cache", () => { + const { exitCode, stderr } = Bun.spawnSync({ + cmd: [bunExe(), join(import.meta.dir, "bundler-reloader-script.ts")], + env: bunEnv, + stderr: "pipe", + stdout: "inherit", + }); + if (stderr.byteLength > 0) { + throw new Error(stderr.toString()); + } + + expect(exitCode).toBe(0); +}); |