aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-08-21 22:53:25 -0700
committerGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-08-21 22:53:25 -0700
commite012efa1243d09fb1de282ac0a1fa6c8b07538a5 (patch)
tree46a3d71fbfd8abccc650554bbb54dcf1415a9a1c
parent468c22de0e8deff28b4b7f780c640ffe3529343a (diff)
downloadbun-e012efa1243d09fb1de282ac0a1fa6c8b07538a5.tar.gz
bun-e012efa1243d09fb1de282ac0a1fa6c8b07538a5.tar.zst
bun-e012efa1243d09fb1de282ac0a1fa6c8b07538a5.zip
Fix watcher when you move files/dirs around. It'll bust the cache and recreate it (and leak memory)
Former-commit-id: 8faf6127547411c1fdcee9e4e7440825f21ecd99
-rw-r--r--src/allocators.zig81
-rw-r--r--src/bundler.zig20
-rw-r--r--src/feature_flags.zig2
-rw-r--r--src/fs.zig52
-rw-r--r--src/http.zig67
-rw-r--r--src/resolver/package_json.zig2
-rw-r--r--src/resolver/resolver.zig36
-rw-r--r--src/router.zig34
-rw-r--r--src/string_immutable.zig26
-rw-r--r--src/watcher.zig146
10 files changed, 297 insertions, 169 deletions
diff --git a/src/allocators.zig b/src/allocators.zig
index 9a18049dc..353392b20 100644
--- a/src/allocators.zig
+++ b/src/allocators.zig
@@ -124,8 +124,9 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type {
pub var backing_buf_used: u16 = 0;
const Allocator = std.mem.Allocator;
const Self = @This();
+ const OverflowListType = std.ArrayListUnmanaged(ValueType);
- overflow_list: std.ArrayListUnmanaged(ValueType),
+ overflow_list: OverflowListType,
allocator: *Allocator,
pub var instance: Self = undefined;
@@ -133,7 +134,7 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type {
pub fn init(allocator: *std.mem.Allocator) *Self {
instance = Self{
.allocator = allocator,
- .overflow_list = std.ArrayListUnmanaged(ValueType){},
+ .overflow_list = OverflowListType{},
};
return &instance;
@@ -533,32 +534,30 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
}
}
- pub fn remove(self: *Self, key: string) IndexType {
+ pub fn remove(self: *Self, key: []const u8) void {
const _key = Wyhash.hash(Seed, key);
- const index = self.index.get(_key) orelse return;
- switch (index) {
- Unassigned.index => {
- self.index.remove(_key);
- },
- NotFound.index => {
- self.index.remove(_key);
- },
- 0...max_index => {
- if (hasDeinit(ValueType)) {
- backing_buf[index].deinit();
- }
- backing_buf[index] = undefined;
- },
- else => {
- const i = index - count;
- if (hasDeinit(ValueType)) {
- self.overflow_list.items[i].deinit();
- }
- self.overflow_list.items[index - count] = undefined;
- },
- }
+ _ = self.index.remove(_key);
+ // const index = self.index.get(_key) orelse return;
+ // switch (index) {
+ // Unassigned.index, NotFound.index => {
+ // self.index.remove(_key);
+ // },
+ // 0...max_index => {
+ // if (comptime hasDeinit(ValueType)) {
+ // backing_buf[index].deinit();
+ // }
+
+ // backing_buf[index] = undefined;
+ // },
+ // else => {
+ // const i = index - count;
+ // if (hasDeinit(ValueType)) {
+ // self.overflow_list.items[i].deinit();
+ // }
+ // self.overflow_list.items[index - count] = undefined;
+ // },
+ // }
- return index;
}
};
if (!store_keys) {
@@ -573,11 +572,14 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
var key_list_buffer_used: usize = 0;
var key_list_slices: [count][]u8 = undefined;
var key_list_overflow: std.ArrayListUnmanaged([]u8) = undefined;
-
+ var instance_loaded = false;
pub fn init(allocator: *std.mem.Allocator) *Self {
- instance = Self{
- .map = BSSMapType.init(allocator),
- };
+ if (!instance_loaded) {
+ instance = Self{
+ .map = BSSMapType.init(allocator),
+ };
+ instance_loaded = true;
+ }
return &instance;
}
@@ -660,7 +662,7 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
}
// For now, don't free the keys.
- pub fn remove(self: *Self, key: string) IndexType {
+ pub fn remove(self: *Self, key: []const u8) void {
return self.map.remove(key);
}
};
@@ -770,20 +772,17 @@ pub fn TBSSMap(comptime ValueType: type, comptime count: anytype, store_keys: bo
}
}
- pub fn remove(self: *Self, key: string) IndexType {
+ pub fn remove(self: *Self, key: []const u8) IndexType {
const _key = Wyhash.hash(Seed, key);
const index = self.index.get(_key) orelse return;
+ defer _ = self.index.remove(_key);
+
switch (index) {
- Unassigned.index => {
- self.index.remove(_key);
- },
- NotFound.index => {
- self.index.remove(_key);
- },
+ NotFound.index, Unassigned.index => {},
0...max_index => {
- if (hasDeinit(ValueType)) {
- backing_buf[index].deinit();
- }
+ // if (hasDeinit(ValueType)) {
+ // backing_buf[index].deinit();
+ // }
backing_buf[index] = undefined;
},
else => {
diff --git a/src/bundler.zig b/src/bundler.zig
index 410db7cc3..ad95f1fe4 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -296,6 +296,26 @@ pub fn NewBundler(cache_files: bool) type {
pub const isCacheEnabled = cache_files;
+ pub fn clone(this: *ThisBundler, allocator: *std.mem.Allocator, to: *ThisBundler) !void {
+ to.* = this.*;
+ to.setAllocator(allocator);
+ to.log = try allocator.create(logger.Log);
+ to.log.* = logger.Log.init(allocator);
+ to.setLog(to.log);
+ }
+
+ pub fn setLog(this: *ThisBundler, log: *logger.Log) void {
+ this.log = log;
+ this.linker.log = log;
+ this.resolver.log = log;
+ }
+
+ pub fn setAllocator(this: *ThisBundler, allocator: *std.mem.Allocator) void {
+ this.allocator = allocator;
+ this.linker.allocator = allocator;
+ this.resolver.allocator = allocator;
+ }
+
// to_bundle:
// thread_pool: *ThreadPool,
diff --git a/src/feature_flags.zig b/src/feature_flags.zig
index 44d901f14..a773213ef 100644
--- a/src/feature_flags.zig
+++ b/src/feature_flags.zig
@@ -34,7 +34,7 @@ pub const css_supports_fence = true;
pub const enable_entry_cache = true;
pub const enable_bytecode_caching = false;
-pub const watch_directories = false;
+pub const watch_directories = true;
// This feature flag exists so when you have defines inside package.json, you can use single quotes in nested strings.
pub const allow_json_single_quotes = true;
diff --git a/src/fs.zig b/src/fs.zig
index 3ec572fc7..e35b537c1 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -160,6 +160,10 @@ pub const FileSystem = struct {
fd: StoredFileDescriptorType = 0,
data: EntryMap,
+ pub fn removeEntry(dir: *DirEntry, name: string) !void {
+ dir.data.remove(name);
+ }
+
pub fn addEntry(dir: *DirEntry, entry: std.fs.Dir.Entry) !void {
var _kind: Entry.Kind = undefined;
switch (entry.kind) {
@@ -179,10 +183,13 @@ pub const FileSystem = struct {
}
// entry.name only lives for the duration of the iteration
- const name = try FileSystem.FilenameStore.instance.appendLowerCase(@TypeOf(entry.name), entry.name);
+ const name = if (entry.name.len >= strings.StringOrTinyString.Max)
+ strings.StringOrTinyString.init(try FileSystem.FilenameStore.instance.appendLowerCase(@TypeOf(entry.name), entry.name))
+ else
+ strings.StringOrTinyString.initLowerCase(entry.name);
- const index = try EntryStore.instance.append(Entry{
- .base = name,
+ const result = Entry{
+ .base_ = name,
.dir = dir.dir,
.mutex = Mutex.init(),
// Call "stat" lazily for performance. The "@material-ui/icons" package
@@ -193,9 +200,10 @@ pub const FileSystem = struct {
.symlink = "",
.kind = _kind,
},
- });
+ };
+ const index = try EntryStore.instance.append(result);
- try dir.data.put(name, index);
+ try dir.data.put(EntryStore.instance.at(index).?.base(), index);
}
pub fn updateDir(i: *DirEntry, dir: string) void {
@@ -242,11 +250,11 @@ pub const FileSystem = struct {
const query = scratch_lookup_buffer[0 .. end + 1];
const result_index = entry.data.get(query) orelse return null;
const result = EntryStore.instance.at(result_index) orelse return null;
- if (!strings.eql(result.base, query)) {
+ if (!strings.eql(result.base(), query)) {
return Entry.Lookup{ .entry = result, .diff_case = Entry.Lookup.DifferentCase{
.dir = entry.dir,
.query = _query,
- .actual = result.base,
+ .actual = result.base(),
} };
}
@@ -263,11 +271,11 @@ pub const FileSystem = struct {
const result_index = entry.data.getWithHash(&query, query_hashed) orelse return null;
const result = EntryStore.instance.at(result_index) orelse return null;
- if (!strings.eqlComptime(result.base, query)) {
+ if (!strings.eqlComptime(result.base(), query)) {
return Entry.Lookup{ .entry = result, .diff_case = Entry.Lookup.DifferentCase{
.dir = entry.dir,
.query = &query,
- .actual = result.base,
+ .actual = result.base(),
} };
}
@@ -290,10 +298,14 @@ pub const FileSystem = struct {
pub const Entry = struct {
cache: Cache = Cache{},
dir: string,
- base: string,
+ base_: strings.StringOrTinyString,
mutex: Mutex,
need_stat: bool = true,
+ pub inline fn base(this: *const Entry) string {
+ return this.base_.slice();
+ }
+
pub const Lookup = struct {
entry: *Entry,
diff_case: ?DifferentCase,
@@ -306,7 +318,8 @@ pub const FileSystem = struct {
};
pub fn deinit(e: *Entry, allocator: *std.mem.Allocator) void {
- allocator.free(e.base);
+ e.base_.deinit(allocator);
+
allocator.free(e.dir);
allocator.free(e.cache.symlink);
allocator.destroy(e);
@@ -325,7 +338,7 @@ pub const FileSystem = struct {
pub fn kind(entry: *Entry, fs: *Implementation) Kind {
if (entry.need_stat) {
entry.need_stat = false;
- entry.cache = fs.kind(entry.dir, entry.base) catch unreachable;
+ entry.cache = fs.kind(entry.dir, entry.base()) catch unreachable;
}
return entry.cache.kind;
}
@@ -333,7 +346,7 @@ pub const FileSystem = struct {
pub fn symlink(entry: *Entry, fs: *Implementation) string {
if (entry.need_stat) {
entry.need_stat = false;
- entry.cache = fs.kind(entry.dir, entry.base) catch unreachable;
+ entry.cache = fs.kind(entry.dir, entry.base()) catch unreachable;
}
return entry.cache.symlink;
}
@@ -503,6 +516,10 @@ pub const FileSystem = struct {
return !(rfs.file_limit > 254 and rfs.file_limit > (FileSystem.max_fd + 1) * 2);
}
+ pub fn bustEntriesCache(rfs: *RealFS, file_path: string) void {
+ rfs.entries.remove(file_path);
+ }
+
// Always try to max out how many files we can keep open
pub fn adjustUlimit() usize {
var limit = std.os.getrlimit(.NOFILE) catch return 32;
@@ -634,7 +651,7 @@ pub const FileSystem = struct {
// This custom map implementation:
// - Preallocates a fixed amount of directory name space
// - Doesn't store directory names which don't exist.
- pub const Map = allocators.TBSSMap(EntriesOption, Preallocate.Counts.dir_entry, false, 128);
+ pub const Map = allocators.BSSMap(EntriesOption, Preallocate.Counts.dir_entry, false, 128);
};
// Limit the number of files open simultaneously to avoid ulimit issues
@@ -693,7 +710,7 @@ pub const FileSystem = struct {
}
fn readDirectoryError(fs: *RealFS, dir: string, err: anyerror) !*EntriesOption {
- if (FeatureFlags.disable_entry_cache) {
+ if (FeatureFlags.enable_entry_cache) {
fs.entries_mutex.lock();
defer fs.entries_mutex.unlock();
var get_or_put_result = try fs.entries.getOrPut(dir);
@@ -716,7 +733,7 @@ pub const FileSystem = struct {
var dir = _dir;
var cache_result: ?allocators.Result = null;
- if (FeatureFlags.disable_entry_cache) {
+ if (FeatureFlags.enable_entry_cache) {
fs.entries_mutex.lock();
defer fs.entries_mutex.unlock();
@@ -750,7 +767,7 @@ pub const FileSystem = struct {
return fs.readDirectoryError(dir, err) catch unreachable;
};
- if (FeatureFlags.disable_entry_cache) {
+ if (FeatureFlags.enable_entry_cache) {
fs.entries_mutex.lock();
defer fs.entries_mutex.unlock();
const result = EntriesOption{
@@ -1081,4 +1098,3 @@ test "PathName.init" {
}
test {}
-
diff --git a/src/http.zig b/src/http.zig
index fa4665ff2..9f19d930f 100644
--- a/src/http.zig
+++ b/src/http.zig
@@ -1740,7 +1740,13 @@ pub const Server = struct {
const file_paths = slice.items(.file_path);
var counts = slice.items(.count);
const kinds = slice.items(.kind);
+ const hashes = slice.items(.hash);
+ const parent_hashes = slice.items(.parent_hash);
+ const fds = slice.items(.fd);
var header = fbs.getWritten();
+ defer ctx.watcher.flushEvictions();
+ defer Output.flush();
+
for (events) |event| {
const file_path = file_paths[event.index];
const update_count = counts[event.index] + 1;
@@ -1760,29 +1766,46 @@ pub const Server = struct {
}
}
- defer Output.flush();
-
switch (kind) {
.file => {
- const change_message = Api.WebsocketMessageFileChangeNotification{
- .id = id,
- .loader = (ctx.bundler.options.loaders.get(path.ext) orelse .file).toAPI(),
- };
+ if (event.op.delete or event.op.rename) {
+ var rfs: *Fs.FileSystem.RealFS = &ctx.bundler.fs.fs;
+ ctx.watcher.removeAtIndex(
+ event.index,
+ 0,
+ &.{},
+ .file,
+ );
- var content_writer = ByteApiWriter.init(&content_fbs);
- change_message.encode(&content_writer) catch unreachable;
- const change_buf = content_fbs.getWritten();
- const written_buf = filechange_buf[0 .. header.len + change_buf.len];
- RequestContext.WebsocketHandler.broadcast(written_buf) catch |err| {
- Output.prettyln("Error writing change notification: {s}", .{@errorName(err)});
- };
+ if (comptime FeatureFlags.verbose_watcher) {
+ Output.prettyln("<r><d>File changed: {s}<r>", .{ctx.bundler.fs.relativeTo(file_path)});
+ }
+ } else {
+ const change_message = Api.WebsocketMessageFileChangeNotification{
+ .id = id,
+ .loader = (ctx.bundler.options.loaders.get(path.ext) orelse .file).toAPI(),
+ };
- Output.prettyln("<r><d>Detected file change: {s}", .{ctx.bundler.fs.relativeTo(file_path)});
+ var content_writer = ByteApiWriter.init(&content_fbs);
+ change_message.encode(&content_writer) catch unreachable;
+ const change_buf = content_fbs.getWritten();
+ const written_buf = filechange_buf[0 .. header.len + change_buf.len];
+ RequestContext.WebsocketHandler.broadcast(written_buf) catch |err| {
+ Output.prettyln("Error writing change notification: {s}<r>", .{@errorName(err)});
+ };
+ Output.prettyln("<r><d>Detected edit: {s}<r>", .{ctx.bundler.fs.relativeTo(file_path)});
+ }
},
.directory => {
var rfs: *Fs.FileSystem.RealFS = &ctx.bundler.fs.fs;
rfs.bustEntriesCache(file_path);
- Output.prettyln("<r><d>Detected folder change: {s}", .{ctx.bundler.fs.relativeTo(file_path)});
+ ctx.bundler.resolver.dir_cache.remove(file_path);
+
+ if (event.op.delete or event.op.rename) {
+ ctx.watcher.removeAtIndex(event.index, hashes[event.index], parent_hashes, .directory);
+ }
+
+ Output.prettyln("<r><d>Folder change: {s}<r>", .{ctx.bundler.fs.relativeTo(file_path)});
},
}
}
@@ -2051,6 +2074,20 @@ pub const Server = struct {
pub fn initWatcher(server: *Server) !void {
server.watcher = try Watcher.init(server, server.bundler.fs, server.allocator);
+
+ if (comptime FeatureFlags.watch_directories) {
+ server.bundler.resolver.onStartWatchingDirectoryCtx = server.watcher;
+ server.bundler.resolver.onStartWatchingDirectory = onMaybeWatchDirectory;
+ }
+ }
+
+ pub fn onMaybeWatchDirectory(watch: *Watcher, file_path: string, dir_fd: StoredFileDescriptorType) void {
+ // We don't want to watch:
+ // - Directories outside the root directory
+ // - Directories inside node_modules
+ if (std.mem.indexOf(u8, file_path, "node_modules") == null and std.mem.indexOf(u8, file_path, watch.fs.top_level_dir) != null) {
+ watch.addDirectory(dir_fd, file_path, Watcher.getHash(file_path), false) catch {};
+ }
}
pub fn start(allocator: *std.mem.Allocator, options: Api.TransformOptions) !void {
diff --git a/src/resolver/package_json.zig b/src/resolver/package_json.zig
index fea7fef8a..4b1b9d530 100644
--- a/src/resolver/package_json.zig
+++ b/src/resolver/package_json.zig
@@ -368,7 +368,7 @@ pub const PackageJSON = struct {
// TODO: remove this extra copy
const parts = [_]string{ input_path, "package.json" };
const package_json_path_ = r.fs.abs(&parts);
- const package_json_path = r.fs.filename_store.append(@TypeOf(package_json_path_), package_json_path_) catch unreachable;
+ const package_json_path = r.fs.dirname_store.append(@TypeOf(package_json_path_), package_json_path_) catch unreachable;
const entry = r.caches.fs.readFile(
r.fs,
diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig
index b48b42da4..8dfcd939a 100644
--- a/src/resolver/resolver.zig
+++ b/src/resolver/resolver.zig
@@ -11,6 +11,7 @@ const PackageJSON = @import("./package_json.zig").PackageJSON;
usingnamespace @import("./data_url.zig");
pub const DirInfo = @import("./dir_info.zig");
const Expr = @import("../js_ast.zig").Expr;
+const HTTPWatcher = @import("../http.zig").Watcher;
const Wyhash = std.hash.Wyhash;
const hash_map_v2 = @import("../hash_map_v2.zig");
@@ -277,6 +278,9 @@ pub fn NewResolver(cache_files: bool) type {
debug_logs: ?DebugLogs = null,
elapsed: i128 = 0, // tracing
+ onStartWatchingDirectory: ?fn (*HTTPWatcher, dir_path: string, dir_fd: StoredFileDescriptorType) void = null,
+ onStartWatchingDirectoryCtx: ?*HTTPWatcher = null,
+
caches: CacheSet,
// These are sets that represent various conditions for the "exports" field
@@ -586,7 +590,7 @@ pub fn NewResolver(cache_files: bool) type {
}
} else if (dir.abs_real_path.len > 0) {
path.non_symlink = path.text;
- var parts = [_]string{ dir.abs_real_path, query.entry.base };
+ var parts = [_]string{ dir.abs_real_path, query.entry.base() };
var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var out = r.fs.absBuf(&parts, &buf);
const symlink = try Fs.FileSystem.FilenameStore.instance.append(@TypeOf(out), out);
@@ -688,7 +692,7 @@ pub fn NewResolver(cache_files: bool) type {
}
return Result{
- .path_pair = .{ .primary = Path.init(r.fs.filename_store.append(@TypeOf(abs_path), abs_path) catch unreachable) },
+ .path_pair = .{ .primary = Path.init(r.fs.dirname_store.append(@TypeOf(abs_path), abs_path) catch unreachable) },
.is_external = true,
};
}
@@ -703,7 +707,7 @@ pub fn NewResolver(cache_files: bool) type {
if (r.checkBrowserMap(pkg, rel_path)) |remap| {
// Is the path disabled?
if (remap.len == 0) {
- var _path = Path.init(r.fs.filename_store.append(string, abs_path) catch unreachable);
+ var _path = Path.init(r.fs.dirname_store.append(string, abs_path) catch unreachable);
_path.is_disabled = true;
return Result{
.path_pair = PathPair{
@@ -996,14 +1000,14 @@ pub fn NewResolver(cache_files: bool) type {
// this might leak
if (!std.fs.path.isAbsolute(result.base_url)) {
const paths = [_]string{ file_dir, result.base_url };
- result.base_url = r.fs.filename_store.append(string, r.fs.absBuf(&paths, &tsconfig_base_url_buf)) catch unreachable;
+ result.base_url = r.fs.dirname_store.append(string, r.fs.absBuf(&paths, &tsconfig_base_url_buf)) catch unreachable;
}
}
if (result.paths.count() > 0 and (result.base_url_for_paths.len == 0 or !std.fs.path.isAbsolute(result.base_url_for_paths))) {
// this might leak
const paths = [_]string{ file_dir, result.base_url };
- result.base_url_for_paths = r.fs.filename_store.append(string, r.fs.absBuf(&paths, &tsconfig_base_url_buf)) catch unreachable;
+ result.base_url_for_paths = r.fs.dirname_store.append(string, r.fs.absBuf(&paths, &tsconfig_base_url_buf)) catch unreachable;
}
return result;
@@ -1544,7 +1548,7 @@ pub fn NewResolver(cache_files: bool) type {
if (lookup.entry.kind(rfs) == .file) {
const parts = [_]string{ path, base };
const out_buf_ = r.fs.absBuf(&parts, &index_buf);
- const out_buf = r.fs.filename_store.append(@TypeOf(out_buf_), out_buf_) catch unreachable;
+ const out_buf = r.fs.dirname_store.append(@TypeOf(out_buf_), out_buf_) catch unreachable;
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Found file: \"{s}\"", .{out_buf}) catch unreachable;
}
@@ -1657,6 +1661,7 @@ pub fn NewResolver(cache_files: bool) type {
debug.addNoteFmt("Attempting to load \"{s}\" as a directory", .{path}) catch {};
debug.increaseIndent() catch {};
}
+
defer {
if (r.debug_logs) |*debug| {
debug.decreaseIndent() catch {};
@@ -1813,8 +1818,8 @@ pub fn NewResolver(cache_files: bool) type {
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Found file \"{s}\" ", .{base}) catch {};
}
- const abs_path_parts = [_]string{ query.entry.dir, query.entry.base };
- const abs_path = r.fs.filename_store.append(string, r.fs.absBuf(&abs_path_parts, &TemporaryBuffer.ExtensionPathBuf)) catch unreachable;
+ const abs_path_parts = [_]string{ query.entry.dir, query.entry.base() };
+ const abs_path = r.fs.dirname_store.append(string, r.fs.absBuf(&abs_path_parts, &TemporaryBuffer.ExtensionPathBuf)) catch unreachable;
return LoadResult{
.path = abs_path,
@@ -1843,7 +1848,7 @@ pub fn NewResolver(cache_files: bool) type {
// now that we've found it, we allocate it.
return LoadResult{
- .path = r.fs.filename_store.append(@TypeOf(buffer), buffer) catch unreachable,
+ .path = r.fs.dirname_store.append(@TypeOf(buffer), buffer) catch unreachable,
.diff_case = query.diff_case,
.dirname_fd = entries.fd,
};
@@ -1885,7 +1890,7 @@ pub fn NewResolver(cache_files: bool) type {
}
return LoadResult{
- .path = r.fs.filename_store.append(@TypeOf(buffer), buffer) catch unreachable,
+ .path = r.fs.dirname_store.append(@TypeOf(buffer), buffer) catch unreachable,
.diff_case = query.diff_case,
.dirname_fd = entries.fd,
};
@@ -1901,6 +1906,15 @@ pub fn NewResolver(cache_files: bool) type {
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Failed to find \"{s}\" ", .{path}) catch {};
}
+
+ if (comptime FeatureFlags.watch_directories) {
+ // For existent directories which don't find a match
+ // Start watching it automatically,
+ // onStartWatchingDirectory fn decides whether to actually watch.
+ if (r.onStartWatchingDirectoryCtx) |ctx| {
+ r.onStartWatchingDirectory.?(ctx, entries.dir, entries.fd);
+ }
+ }
return null;
}
@@ -1957,7 +1971,7 @@ pub fn NewResolver(cache_files: bool) type {
} else if (parent.?.abs_real_path.len > 0) {
// this might leak a little i'm not sure
const parts = [_]string{ parent.?.abs_real_path, base };
- symlink = r.fs.filename_store.append(string, r.fs.joinBuf(&parts, &dir_info_uncached_filename_buf)) catch unreachable;
+ symlink = r.fs.dirname_store.append(string, r.fs.joinBuf(&parts, &dir_info_uncached_filename_buf)) catch unreachable;
if (r.debug_logs) |*logs| {
try logs.addNote(std.fmt.allocPrint(r.allocator, "Resolved symlink \"{s}\" to \"{s}\"", .{ path, symlink }) catch unreachable);
diff --git a/src/router.zig b/src/router.zig
index fb561fcf3..a1f1f826b 100644
--- a/src/router.zig
+++ b/src/router.zig
@@ -62,7 +62,7 @@ pub fn getEntryPointsWithBuffer(this: *const Router, allocator: *std.mem.Allocat
);
if (children.len == 0) {
if (Fs.FileSystem.DirEntry.EntryStore.instance.at(this.routes.routes.items(.entry_index)[i])) |entry| {
- str_len += entry.base.len + entry.dir.len;
+ str_len += entry.base().len + entry.dir.len;
}
}
}
@@ -78,10 +78,10 @@ pub fn getEntryPointsWithBuffer(this: *const Router, allocator: *std.mem.Allocat
if (children.len == 0) {
if (Fs.FileSystem.DirEntry.EntryStore.instance.at(this.routes.routes.items(.entry_index)[i])) |entry| {
if (comptime absolute) {
- var parts = [_]string{ entry.dir, entry.base };
+ var parts = [_]string{ entry.dir, entry.base() };
entry_points[entry_point_i] = this.fs.absBuf(&parts, remain);
} else {
- var parts = [_]string{ "/", this.config.asset_prefix_path, this.fs.relativeTo(entry.dir), entry.base };
+ var parts = [_]string{ "/", this.config.asset_prefix_path, this.fs.relativeTo(entry.dir), entry.base() };
entry_points[entry_point_i] = this.fs.joinBuf(&parts, remain);
}
@@ -118,24 +118,24 @@ pub fn loadRoutes(
var iter = entries.data.iterator();
outer: while (iter.next()) |entry_ptr| {
const entry = Fs.FileSystem.DirEntry.EntryStore.instance.at(entry_ptr.value) orelse continue;
- if (entry.base[0] == '.') {
+ if (entry.base()[0] == '.') {
continue :outer;
}
switch (entry.kind(fs)) {
.dir => {
inline for (banned_dirs) |banned_dir| {
- if (strings.eqlComptime(entry.base, comptime banned_dir)) {
+ if (strings.eqlComptime(entry.base(), comptime banned_dir)) {
continue :outer;
}
}
- var abs_parts = [_]string{ entry.dir, entry.base };
+ var abs_parts = [_]string{ entry.dir, entry.base() };
if (resolver.readDirInfoIgnoreError(this.fs.abs(&abs_parts))) |_dir_info| {
const dir_info: *const DirInfo = _dir_info;
var route: Route = Route.parse(
- entry.base,
+ entry.base(),
Fs.PathName.init(entry.dir[this.config.dir.len..]).dirWithTrailingSlash(),
"",
entry_ptr.value,
@@ -160,14 +160,14 @@ pub fn loadRoutes(
},
.file => {
- const extname = std.fs.path.extension(entry.base);
+ const extname = std.fs.path.extension(entry.base());
// exclude "." or ""
if (extname.len < 2) continue;
for (this.config.extensions) |_extname| {
if (strings.eql(extname[1..], _extname)) {
var route = Route.parse(
- entry.base,
+ entry.base(),
// we extend the pointer length by one to get it's slash
entry.dir.ptr[this.config.dir.len..entry.dir.len],
extname,
@@ -411,7 +411,7 @@ pub const RouteMap = struct {
return null;
} else {
if (Fs.FileSystem.DirEntry.EntryStore.instance.at(head.entry_index)) |entry| {
- var parts = [_]string{ entry.dir, entry.base };
+ var parts = [_]string{ entry.dir, entry.base() };
const file_path = Fs.FileSystem.instance.absBuf(&parts, this.matched_route_buf);
match_result = Match{
@@ -421,7 +421,7 @@ pub const RouteMap = struct {
.hash = head.full_hash,
.query_string = this.url_path.query_string,
.pathname = this.url_path.pathname,
- .basename = entry.base,
+ .basename = entry.base(),
.file_path = file_path,
};
@@ -500,14 +500,14 @@ pub const RouteMap = struct {
if (path.len == 0) {
if (this.index) |index| {
const entry = Fs.FileSystem.DirEntry.EntryStore.instance.at(routes_slice.items(.entry_index)[index]).?;
- const parts = [_]string{ entry.dir, entry.base };
+ const parts = [_]string{ entry.dir, entry.base() };
return Match{
.params = params,
.name = routes_slice.items(.name)[index],
.path = routes_slice.items(.path)[index],
.pathname = url_path.pathname,
- .basename = entry.base,
+ .basename = entry.base(),
.hash = index_route_hash,
.file_path = Fs.FileSystem.instance.absBuf(&parts, file_path_buf),
.query_string = url_path.query_string,
@@ -531,14 +531,14 @@ pub const RouteMap = struct {
for (children) |child_hash, i| {
if (child_hash == index_route_hash) {
const entry = Fs.FileSystem.DirEntry.EntryStore.instance.at(routes_slice.items(.entry_index)[i + route.children.offset]).?;
- const parts = [_]string{ entry.dir, entry.base };
+ const parts = [_]string{ entry.dir, entry.base() };
return Match{
.params = params,
.name = routes_slice.items(.name)[i],
.path = routes_slice.items(.path)[i],
.pathname = url_path.pathname,
- .basename = entry.base,
+ .basename = entry.base(),
.hash = child_hash,
.file_path = Fs.FileSystem.instance.absBuf(&parts, file_path_buf),
.query_string = url_path.query_string,
@@ -550,14 +550,14 @@ pub const RouteMap = struct {
// /foo/bar => /foo/bar.js
} else {
const entry = Fs.FileSystem.DirEntry.EntryStore.instance.at(route.entry_index).?;
- const parts = [_]string{ entry.dir, entry.base };
+ const parts = [_]string{ entry.dir, entry.base() };
return Match{
.params = params,
.name = route.name,
.path = route.path,
.redirect_path = if (redirect) path else null,
.hash = full_hash,
- .basename = entry.base,
+ .basename = entry.base(),
.pathname = url_path.pathname,
.query_string = url_path.query_string,
.file_path = Fs.FileSystem.instance.absBuf(&parts, file_path_buf),
diff --git a/src/string_immutable.zig b/src/string_immutable.zig
index 7ae8bed31..84b72343e 100644
--- a/src/string_immutable.zig
+++ b/src/string_immutable.zig
@@ -38,21 +38,25 @@ pub fn cat(allocator: *std.mem.Allocator, first: string, second: string) !string
// 30 character string or a slice
pub const StringOrTinyString = struct {
- const Buffer = [30]u8;
+ pub const Max = 30;
+ const Buffer = [Max]u8;
+
remainder_buf: Buffer = undefined,
remainder_len: u7 = 0,
is_tiny_string: u1 = 0,
pub inline fn slice(this: *const StringOrTinyString) []const u8 {
- switch (this.is_tiny_string) {
- 1 => {
- return this.remainder_buf[0..this.remainder_len];
- },
- // TODO: maybe inline the readIntNative call?
- 0 => {
- const ptr = @intToPtr([*]const u8, std.mem.readIntNative(usize, this.remainder_buf[0..@sizeOf(usize)]));
- return ptr[0..std.mem.readIntNative(usize, this.remainder_buf[@sizeOf(usize) .. @sizeOf(usize) * 2])];
- },
- }
+ // This is a switch expression instead of a statement to make sure it uses the faster assembly
+ return switch (this.is_tiny_string) {
+ 1 => this.remainder_buf[0..this.remainder_len],
+ 0 => @intToPtr([*]const u8, std.mem.readIntNative(usize, this.remainder_buf[0..@sizeOf(usize)]))[0..std.mem.readIntNative(usize, this.remainder_buf[@sizeOf(usize) .. @sizeOf(usize) * 2])],
+ };
+ }
+
+ pub fn deinit(this: *StringOrTinyString, allocator: *std.mem.Allocator) void {
+ if (this.is_tiny_string == 1) return;
+
+ // var slice_ = this.slice();
+ // allocator.free(slice_);
}
pub fn init(stringy: string) StringOrTinyString {
diff --git a/src/watcher.zig b/src/watcher.zig
index 395bfabc5..b3cc4f27a 100644
--- a/src/watcher.zig
+++ b/src/watcher.zig
@@ -8,7 +8,8 @@ const os = std.os;
const KEvent = std.os.Kevent;
const Mutex = @import("./lock.zig").Lock;
-const ParentWatchItemIndex = u31;
+const WatchItemIndex = u16;
+const NoWatchItem: WatchItemIndex = std.math.maxInt(WatchItemIndex);
pub const WatchItem = struct {
file_path: string,
// filepath hash for quick comparison
@@ -17,14 +18,14 @@ pub const WatchItem = struct {
loader: options.Loader,
fd: StoredFileDescriptorType,
count: u32,
- parent_watch_item: ?ParentWatchItemIndex,
+ parent_hash: u32,
kind: Kind,
pub const Kind = enum { file, directory };
};
pub const WatchEvent = struct {
- index: u32,
+ index: WatchItemIndex,
op: Op,
pub fn fromKEvent(this: *WatchEvent, kevent: *const KEvent) void {
@@ -32,7 +33,7 @@ pub const WatchEvent = struct {
this.op.metadata = (kevent.fflags & std.os.NOTE_ATTRIB) > 0;
this.op.rename = (kevent.fflags & std.os.NOTE_RENAME) > 0;
this.op.write = (kevent.fflags & std.os.NOTE_WRITE) > 0;
- this.index = @truncate(u32, kevent.udata);
+ this.index = @truncate(WatchItemIndex, kevent.udata);
}
pub const Op = packed struct {
@@ -54,6 +55,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
const Watcher = @This();
const KEventArrayList = std.ArrayList(KEvent);
+ const WATCHER_MAX_LIST = 8096;
watchlist: Watchlist,
watched_count: usize = 0,
@@ -66,7 +68,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
watch_events: [128]WatchEvent = undefined,
// Everything being watched
- eventlist: [8096]KEvent = undefined,
+ eventlist: [WATCHER_MAX_LIST]KEvent = undefined,
eventlist_used: usize = 0,
fs: *Fs.FileSystem,
@@ -79,6 +81,8 @@ pub fn NewWatcher(comptime ContextType: type) type {
pub const HashType = u32;
+ var evict_list: [WATCHER_MAX_LIST]WatchItemIndex = undefined;
+
pub fn getHash(filepath: string) HashType {
return @truncate(HashType, std.hash.Wyhash.hash(0, filepath));
}
@@ -137,11 +141,63 @@ pub fn NewWatcher(comptime ContextType: type) type {
};
}
+ var evict_list_i: WatchItemIndex = 0;
+ pub fn removeAtIndex(this: *Watcher, index: WatchItemIndex, hash: HashType, parents: []HashType, comptime kind: WatchItem.Kind) void {
+ std.debug.assert(index != NoWatchItem);
+
+ evict_list[evict_list_i] = index;
+ evict_list_i += 1;
+
+ if (comptime kind == .directory) {
+ for (parents) |parent, i| {
+ if (parent == hash) {
+ evict_list[evict_list_i] = @truncate(WatchItemIndex, parent);
+ evict_list_i += 1;
+ }
+ }
+ }
+ }
+
+ pub fn flushEvictions(this: *Watcher) void {
+ if (evict_list_i == 0) return;
+ this.mutex.lock();
+ defer this.mutex.unlock();
+ defer evict_list_i = 0;
+
+ // swapRemove messes up the order
+ // But, it only messes up the order if any elements in the list appear after the item being removed
+ // So if we just sort the list by the biggest index first, that should be fine
+ std.sort.sort(
+ WatchItemIndex,
+ evict_list[0..evict_list_i],
+ {},
+ comptime std.sort.desc(WatchItemIndex),
+ );
+
+ var slice = this.watchlist.slice();
+ var fds = slice.items(.fd);
+ var last_item = NoWatchItem;
+
+ for (evict_list[0..evict_list_i]) |item, i| {
+ // catch duplicates, since the list is sorted, duplicates will appear right after each other
+ if (item == last_item) continue;
+ // close the file descriptors here. this should automatically remove it from being watched too.
+ std.os.close(fds[item]);
+ last_item = item;
+ }
+
+ last_item = NoWatchItem;
+ // This is split into two passes because reading the slice while modified is potentially unsafe.
+ for (evict_list[0..evict_list_i]) |item, i| {
+ if (item == last_item) continue;
+ this.watchlist.swapRemove(item);
+ last_item = item;
+ }
+ }
+
fn _watchLoop(this: *Watcher) !void {
const time = std.time;
- // poll at 1 second intervals if it hasn't received any events.
- // var timeout_spec = null;
std.debug.assert(this.fd > 0);
var changelist_array: [1]KEvent = std.mem.zeroes([1]KEvent);
@@ -167,7 +223,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
}
}
- pub fn indexOf(this: *Watcher, hash: u32) ?usize {
+ pub fn indexOf(this: *Watcher, hash: HashType) ?usize {
for (this.watchlist.items(.hash)) |other, i| {
if (hash == other) {
return i;
@@ -180,7 +236,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
this: *Watcher,
fd: StoredFileDescriptorType,
file_path: string,
- hash: u32,
+ hash: HashType,
loader: options.Loader,
dir_fd: StoredFileDescriptorType,
comptime copy_file_path: bool,
@@ -196,9 +252,9 @@ pub fn NewWatcher(comptime ContextType: type) type {
this: *Watcher,
fd: StoredFileDescriptorType,
file_path: string,
- hash: u32,
+ hash: HashType,
loader: options.Loader,
- parent_watch_item: ?ParentWatchItemIndex,
+ parent_hash: HashType,
comptime copy_file_path: bool,
) !void {
// https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html
@@ -214,7 +270,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
// we should monitor:
// - Delete
- event.fflags = std.os.NOTE_WRITE | std.os.NOTE_RENAME;
+ event.fflags = std.os.NOTE_WRITE | std.os.NOTE_RENAME | std.os.NOTE_DELETE;
// id
event.ident = @intCast(usize, fd);
@@ -246,7 +302,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
.count = 0,
.eventlist_index = @truncate(u32, index),
.loader = loader,
- .parent_watch_item = parent_watch_item,
+ .parent_hash = parent_hash,
.kind = .file,
});
}
@@ -255,9 +311,9 @@ pub fn NewWatcher(comptime ContextType: type) type {
this: *Watcher,
fd_: StoredFileDescriptorType,
file_path: string,
- hash: u32,
+ hash: HashType,
comptime copy_file_path: bool,
- ) !ParentWatchItemIndex {
+ ) !WatchItemIndex {
const fd = brk: {
if (fd_ > 0) break :brk fd_;
@@ -265,15 +321,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
break :brk @truncate(StoredFileDescriptorType, dir.fd);
};
- // It's not a big deal if we can't watch the parent directory
- // For now at least.
- const parent_watch_item: ?ParentWatchItemIndex = brk: {
- if (!this.isEligibleDirectory(file_path)) break :brk null;
-
- const parent_dir = Fs.PathName.init(file_path).dirWithTrailingSlash();
- const hashes = this.watchlist.items(.hash);
- break :brk @truncate(ParentWatchItemIndex, std.mem.indexOfScalar(HashType, hashes, Watcher.getHash(parent_dir)) orelse break :brk null);
- };
+ const parent_hash = Watcher.getHash(Fs.PathName.init(file_path).dirWithTrailingSlash());
// https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html
var event = std.mem.zeroes(KEvent);
@@ -288,7 +336,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
// we should monitor:
// - Delete
- event.fflags = std.os.NOTE_WRITE | std.os.NOTE_RENAME;
+ event.fflags = std.os.NOTE_WRITE | std.os.NOTE_RENAME | std.os.NOTE_DELETE;
// id
event.ident = @intCast(usize, fd);
@@ -320,21 +368,21 @@ pub fn NewWatcher(comptime ContextType: type) type {
.count = 0,
.eventlist_index = @truncate(u32, index),
.loader = options.Loader.file,
- .parent_watch_item = parent_watch_item,
+ .parent_hash = parent_hash,
.kind = .directory,
});
- return @truncate(ParentWatchItemIndex, this.watchlist.len - 1);
+ return @truncate(WatchItemIndex, this.watchlist.len - 1);
}
- pub fn isEligibleDirectory(this: *Watcher, dir: string) bool {
- return strings.indexOf(this.fs.top_level_dir, dir) != null;
+ pub inline fn isEligibleDirectory(this: *Watcher, dir: string) bool {
+ return strings.indexOf(dir, this.fs.top_level_dir) != null and strings.indexOf(dir, "node_modules") == null;
}
pub fn addDirectory(
this: *Watcher,
fd: StoredFileDescriptorType,
file_path: string,
- hash: u32,
+ hash: HashType,
comptime copy_file_path: bool,
) !void {
if (this.indexOf(hash) != null) {
@@ -353,7 +401,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
this: *Watcher,
fd: StoredFileDescriptorType,
file_path: string,
- hash: u32,
+ hash: HashType,
loader: options.Loader,
dir_fd: StoredFileDescriptorType,
comptime copy_file_path: bool,
@@ -364,31 +412,31 @@ pub fn NewWatcher(comptime ContextType: type) type {
const pathname = Fs.PathName.init(file_path);
const parent_dir = pathname.dirWithTrailingSlash();
- var parent_dir_hash: ?u32 = undefined;
- var watchlist_slice = this.watchlist.slice();
+ var parent_dir_hash: HashType = Watcher.getHash(parent_dir);
- var parent_watch_item: ?ParentWatchItemIndex = null;
+ var parent_watch_item: ?WatchItemIndex = null;
const autowatch_parent_dir = (comptime FeatureFlags.watch_directories) and this.isEligibleDirectory(parent_dir);
if (autowatch_parent_dir) {
+ var watchlist_slice = this.watchlist.slice();
+
if (dir_fd > 0) {
var fds = watchlist_slice.items(.fd);
if (std.mem.indexOfScalar(StoredFileDescriptorType, fds, dir_fd)) |i| {
- parent_watch_item = @truncate(ParentWatchItemIndex, i);
+ parent_watch_item = @truncate(WatchItemIndex, i);
}
}
if (parent_watch_item == null) {
const hashes = watchlist_slice.items(.hash);
- parent_dir_hash = Watcher.getHash(parent_dir);
- if (std.mem.indexOfScalar(HashType, hashes, parent_dir_hash.?)) |i| {
- parent_watch_item = @truncate(ParentWatchItemIndex, i);
+ if (std.mem.indexOfScalar(HashType, hashes, parent_dir_hash)) |i| {
+ parent_watch_item = @truncate(WatchItemIndex, i);
}
}
}
try this.watchlist.ensureUnusedCapacity(this.allocator, 1 + @intCast(usize, @boolToInt(parent_watch_item == null)));
if (autowatch_parent_dir) {
- parent_watch_item = parent_watch_item orelse try this.appendDirectoryAssumeCapacity(dir_fd, parent_dir, parent_dir_hash orelse Watcher.getHash(parent_dir), copy_file_path);
+ parent_watch_item = parent_watch_item orelse try this.appendDirectoryAssumeCapacity(dir_fd, parent_dir, parent_dir_hash, copy_file_path);
}
try this.appendFileAssumeCapacity(
@@ -396,25 +444,15 @@ pub fn NewWatcher(comptime ContextType: type) type {
file_path,
hash,
loader,
- parent_watch_item,
+ parent_dir_hash,
copy_file_path,
);
- if (FeatureFlags.verbose_watcher) {
- if (!autowatch_parent_dir or parent_watch_item == null) {
- if (strings.indexOf(file_path, this.cwd)) |i| {
- Output.prettyln("<r><d>Added <b>./{s}<r><d> to watch list.<r>", .{file_path[i + this.cwd.len ..]});
- } else {
- Output.prettyln("<r><d>Added <b>{s}<r><d> to watch list.<r>", .{file_path});
- }
+ if (comptime FeatureFlags.verbose_watcher) {
+ if (strings.indexOf(file_path, this.cwd)) |i| {
+ Output.prettyln("<r><d>Added <b>./{s}<r><d> to watch list.<r>", .{file_path[i + this.cwd.len ..]});
} else {
- if (strings.indexOf(file_path, this.cwd)) |i| {
- Output.prettyln("<r><d>Added <b>./{s}<r><d> to watch list (and parent dir).<r>", .{
- file_path[i + this.cwd.len ..],
- });
- } else {
- Output.prettyln("<r><d>Added <b>{s}<r><d> to watch list (and parent dir).<r>", .{file_path});
- }
+ Output.prettyln("<r><d>Added <b>{s}<r><d> to watch list.<r>", .{file_path});
}
}
}