aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-09-20 17:39:39 -0700
committerGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-09-20 17:39:39 -0700
commit9f1e0660dd5409c178dfea38a3e40848eb8ff974 (patch)
tree8e74bc88e820183d403bcf23af6b361d6f7187c4
parent7c05bb7d1d868d89608dc88bdcfaa9e4ccd4c1e2 (diff)
downloadbun-9f1e0660dd5409c178dfea38a3e40848eb8ff974.tar.gz
bun-9f1e0660dd5409c178dfea38a3e40848eb8ff974.tar.zst
bun-9f1e0660dd5409c178dfea38a3e40848eb8ff974.zip
Fix handling when file metadata store stored exceeds statically allocated count (at time of writing, 16k)
-rw-r--r--src/allocators.zig149
-rw-r--r--src/fs.zig19
-rw-r--r--src/router.zig78
3 files changed, 86 insertions, 160 deletions
diff --git a/src/allocators.zig b/src/allocators.zig
index b29435a2d..2025c4961 100644
--- a/src/allocators.zig
+++ b/src/allocators.zig
@@ -132,29 +132,46 @@ const hasDeinit = std.meta.trait.hasFn("deinit")(ValueType);
pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type {
const count = _count * 2;
const max_index = count - 1;
- var list_type: type = undefined;
- var list_count = count;
const overflow_init_count = std.math.max(count / 8, 32);
return struct {
pub var backing_buf: [count]ValueType = undefined;
- pub var backing_buf_used: u16 = 0;
+ const ChunkSize = 256;
+ const OverflowBlock = struct {
+ used: std.atomic.Atomic(u16) = std.atomic.Atomic(u16).init(0),
+ data: [ChunkSize]ValueType = undefined,
+ prev: ?*OverflowBlock = null,
+
+ pub fn append(this: *OverflowBlock, item: ValueType) !*ValueType {
+ const index = this.used.fetchAdd(1, .AcqRel);
+ if (index >= ChunkSize) return error.OutOfMemory;
+ this.data[index] = item;
+ return &this.data[index];
+ }
+ };
+
+ pub var used: u32 = 0;
const Allocator = std.mem.Allocator;
const Self = @This();
- const OverflowListType = std.ArrayListUnmanaged(ValueType);
- overflow_list: OverflowListType,
allocator: *Allocator,
mutex: Mutex = Mutex.init(),
+ head: *OverflowBlock = undefined,
+ tail: OverflowBlock = OverflowBlock{},
pub var instance: Self = undefined;
pub var loaded = false;
+ pub inline fn blockIndex(index: u31) usize {
+ return index / ChunkSize;
+ }
+
pub fn init(allocator: *std.mem.Allocator) *Self {
if (!loaded) {
instance = Self{
.allocator = allocator,
- .overflow_list = OverflowListType{},
+ .tail = OverflowBlock{},
};
+ instance.head = &instance.tail;
loaded = true;
}
@@ -162,121 +179,37 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type {
}
pub fn isOverflowing() bool {
- return backing_buf_used >= @as(u16, count);
- }
-
- pub fn at(self: *const Self, index: IndexType) ?*ValueType {
- if (index.index == NotFound.index or index.index == Unassigned.index) return null;
-
- if (index.is_overflow) {
- return &self.overflow_list.items[index.index];
- } else {
- return &backing_buf[index.index];
- }
+ return used >= @as(u16, count);
}
pub fn exists(self: *Self, value: ValueType) bool {
return isSliceInBuffer(value, backing_buf);
}
- pub fn append(self: *Self, value: ValueType) !IndexType {
- self.mutex.lock();
- defer self.mutex.unlock();
- var result = IndexType{ .index = std.math.maxInt(u31), .is_overflow = backing_buf_used > max_index };
- if (result.is_overflow) {
- result.index = @intCast(u31, self.overflow_list.items.len);
- try self.overflow_list.append(self.allocator, value);
- } else {
- result.index = backing_buf_used;
- backing_buf[result.index] = value;
- backing_buf_used += 1;
- if (backing_buf_used >= max_index) {
- self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, overflow_init_count);
- }
- }
-
- return result;
- }
- pub const Pair = struct { index: IndexType, value: *ValueType };
- pub fn appendGet(self: *Self, value: ValueType) !Pair {
- self.mutex.lock();
- defer self.mutex.unlock();
-
- var result = IndexType{ .index = std.math.maxInt(u31), .is_overflow = backing_buf_used > max_index };
- if (result.is_overflow) {
- result.index = @intCast(u31, self.overflow_list.items.len);
- try self.overflow_list.append(self.allocator, value);
- return Pair{ .index = result, .value = &self.overflow_list.items[result.index] };
- } else {
- result.index = backing_buf_used;
- backing_buf[result.index] = value;
- backing_buf_used += 1;
- if (backing_buf_used >= max_index) {
- self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, overflow_init_count);
- }
- return Pair{ .index = result, .value = &backing_buf[result.index] };
- }
+ fn appendOverflow(self: *Self, value: ValueType) !*ValueType {
+ used += 1;
+ return self.head.append(value) catch brk: {
+ var new_block = try self.allocator.create(OverflowBlock);
+ new_block.* = OverflowBlock{};
+ new_block.prev = self.head;
+ self.head = new_block;
+ break :brk self.head.append(value);
+ };
}
- pub fn update(self: *Self, result: *IndexType, value: ValueType) !*ValueType {
+ pub fn append(self: *Self, value: ValueType) !*ValueType {
self.mutex.lock();
defer self.mutex.unlock();
-
- if (result.index.index == NotFound.index or result.index.index == Unassigned.index) {
- result.index.is_overflow = backing_buf_used > max_index;
- if (result.index.is_overflow) {
- result.index.index = @intCast(u31, self.overflow_list.items.len);
- } else {
- result.index.index = backing_buf_used;
- backing_buf_used += 1;
- if (backing_buf_used >= max_index) {
- self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, overflow_init_count);
- }
- }
- }
-
- if (result.index.is_overflow) {
- if (self.overflow_list.items.len == result.index.index) {
- const real_index = self.overflow_list.items.len;
- try self.overflow_list.append(self.allocator, value);
- } else {
- self.overflow_list.items[result.index.index] = value;
- }
-
- return &self.overflow_list.items[result.index.index];
+ if (used > max_index) {
+ return self.appendOverflow(value);
} else {
- backing_buf[result.index.index] = value;
-
- return &backing_buf[result.index.index];
+ const index = used;
+ backing_buf[index] = value;
+ used += 1;
+ return &backing_buf[index];
}
}
-
- pub fn remove(self: *Self, index: IndexType) void {
- @compileError("Not implemented yet.");
- // switch (index) {
- // Unassigned.index => {
- // self.index.remove(_key);
- // },
- // NotFound.index => {
- // self.index.remove(_key);
- // },
- // 0...max_index => {
- // if (hasDeinit(ValueType)) {
- // backing_buf[index].deinit();
- // }
- // backing_buf[index] = undefined;
- // },
- // else => {
- // const i = index - count;
- // if (hasDeinit(ValueType)) {
- // self.overflow_list.items[i].deinit();
- // }
- // self.overflow_list.items[index - count] = undefined;
- // },
- // }
-
- // return index;
- }
+ pub const Pair = struct { index: IndexType, value: *ValueType };
};
}
diff --git a/src/fs.zig b/src/fs.zig
index 4676211d0..517dcdbd5 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -152,7 +152,7 @@ pub const FileSystem = struct {
}
pub const DirEntry = struct {
- pub const EntryMap = hash_map.StringHashMap(allocators.IndexType);
+ pub const EntryMap = hash_map.StringHashMap(*Entry);
pub const EntryStore = allocators.BSSList(Entry, Preallocate.Counts.files);
dir: string,
fd: StoredFileDescriptorType = 0,
@@ -191,7 +191,7 @@ pub const FileSystem = struct {
else
strings.StringOrTinyString.initLowerCase(entry.name);
- const stored = try EntryStore.instance.appendGet(
+ var stored = try EntryStore.instance.append(
Entry{
.base_ = name,
.base_lowercase_ = name_lowercased,
@@ -208,9 +208,9 @@ pub const FileSystem = struct {
},
);
- const stored_name = stored.value.base();
+ const stored_name = stored.base();
- try dir.data.put(stored.value.base_lowercase(), stored.index);
+ try dir.data.put(stored.base_lowercase(), stored);
if (comptime FeatureFlags.verbose_fs) {
if (_kind == .dir) {
Output.prettyln(" + {s}/", .{stored_name});
@@ -250,7 +250,7 @@ pub const FileSystem = struct {
var iter = d.data.iterator();
while (iter.next()) |file_entry| {
- EntryStore.instance.at(file_entry.value).?.deinit(d.data.allocator);
+ // EntryStore.instance.at(file_entry.value).?.deinit(d.data.allocator);
}
d.data.deinit();
@@ -262,8 +262,7 @@ pub const FileSystem = struct {
std.debug.assert(scratch_lookup_buffer.len >= _query.len);
const query = strings.copyLowercase(_query, &scratch_lookup_buffer);
- const result_index = entry.data.get(query) orelse return null;
- const result = EntryStore.instance.at(result_index) orelse return null;
+ const result = entry.data.get(query) orelse return null;
const basename = result.base();
if (!strings.eql(basename, _query)) {
return Entry.Lookup{ .entry = result, .diff_case = Entry.Lookup.DifferentCase{
@@ -284,8 +283,7 @@ pub const FileSystem = struct {
const query_hashed = comptime DirEntry.EntryMap.getHash(&query);
- const result_index = entry.data.getWithHash(&query, query_hashed) orelse return null;
- const result = EntryStore.instance.at(result_index) orelse return null;
+ const result = entry.data.getWithHash(&query, query_hashed) orelse return null;
const basename = result.base();
if (!strings.eqlComptime(basename, comptime query[0..query_str.len])) {
@@ -310,8 +308,7 @@ pub const FileSystem = struct {
const query_hashed = comptime DirEntry.EntryMap.getHash(&query);
- const result_index = entry.data.getWithHash(&query, query_hashed) orelse return false;
- return result_index.index != allocators.NotFound.index and result_index.index != allocators.Unassigned.index;
+ return entry.data.getWithHash(&query, query_hashed) != null;
}
};
diff --git a/src/router.zig b/src/router.zig
index 148c1ed32..40767a644 100644
--- a/src/router.zig
+++ b/src/router.zig
@@ -61,9 +61,8 @@ pub fn getEntryPointsWithBuffer(this: *const Router, allocator: *std.mem.Allocat
@boolToInt(children.len == 0),
);
if (children.len == 0) {
- if (Fs.FileSystem.DirEntry.EntryStore.instance.at(this.routes.routes.items(.entry_index)[i])) |entry| {
- str_len += entry.base().len + entry.dir.len;
- }
+ const entry = this.routes.routes.items(.entry)[i];
+ str_len += entry.base().len + entry.dir.len;
}
}
@@ -76,18 +75,17 @@ pub fn getEntryPointsWithBuffer(this: *const Router, allocator: *std.mem.Allocat
while (i < route_count) : (i += 1) {
const children = this.routes.routes.items(.children)[i];
if (children.len == 0) {
- if (Fs.FileSystem.DirEntry.EntryStore.instance.at(this.routes.routes.items(.entry_index)[i])) |entry| {
- if (comptime absolute) {
- var parts = [_]string{ entry.dir, entry.base() };
- entry_points[entry_point_i] = this.fs.absBuf(&parts, remain);
- } else {
- var parts = [_]string{ "/", this.config.asset_prefix_path, this.fs.relativeTo(entry.dir), entry.base() };
- entry_points[entry_point_i] = this.fs.joinBuf(&parts, remain);
- }
-
- remain = remain[entry_points[entry_point_i].len..];
- entry_point_i += 1;
+ const entry = this.routes.routes.items(.entry)[i];
+ if (comptime absolute) {
+ var parts = [_]string{ entry.dir, entry.base() };
+ entry_points[entry_point_i] = this.fs.absBuf(&parts, remain);
+ } else {
+ var parts = [_]string{ "/", this.config.asset_prefix_path, this.fs.relativeTo(entry.dir), entry.base() };
+ entry_points[entry_point_i] = this.fs.joinBuf(&parts, remain);
}
+
+ remain = remain[entry_points[entry_point_i].len..];
+ entry_point_i += 1;
}
}
@@ -117,8 +115,7 @@ pub fn loadRoutes(
if (root_dir_info.getEntriesConst()) |entries| {
var iter = entries.data.iterator();
outer: while (iter.next()) |entry_ptr| {
- const entry_index = entry_ptr.value;
- const entry = Fs.FileSystem.DirEntry.EntryStore.instance.at(entry_index) orelse continue;
+ const entry = entry_ptr.value;
if (entry.base()[0] == '.') {
continue :outer;
}
@@ -139,7 +136,7 @@ pub fn loadRoutes(
entry.base(),
Fs.PathName.init(entry.dir[this.config.dir.len..]).dirWithTrailingSlash(),
"",
- entry_index,
+ entry,
);
route.parent = parent;
@@ -172,7 +169,7 @@ pub fn loadRoutes(
// we extend the pointer length by one to get it's slash
entry.dir.ptr[this.config.dir.len..entry.dir.len],
extname,
- entry_index,
+ entry,
);
route.parent = parent;
@@ -235,7 +232,7 @@ pub const Route = struct {
hash: u32,
children: Ptr = Ptr{},
parent: u16 = top_level_parent,
- entry_index: allocators.IndexType,
+ entry: *Fs.FileSystem.Entry,
full_hash: u32,
@@ -244,7 +241,7 @@ pub const Route = struct {
pub const List = std.MultiArrayList(Route);
pub const Ptr = TinyPtr;
- pub fn parse(base: string, dir: string, extname: string, entry_index: allocators.IndexType) Route {
+ pub fn parse(base: string, dir: string, extname: string, entry: *Fs.FileSystem.Entry) Route {
const ensure_slash = if (dir.len > 0 and dir[dir.len - 1] != '/') "/" else "";
var parts = [3]string{ dir, ensure_slash, base };
@@ -257,7 +254,7 @@ pub const Route = struct {
return Route{
.name = name,
.path = base,
- .entry_index = entry_index,
+ .entry = entry,
.hash = @truncate(
u32,
std.hash.Wyhash.hash(
@@ -411,23 +408,22 @@ pub const RouteMap = struct {
this.params.shrinkRetainingCapacity(start_len);
return null;
} else {
- if (Fs.FileSystem.DirEntry.EntryStore.instance.at(head.entry_index)) |entry| {
- var parts = [_]string{ entry.dir, entry.base() };
- const file_path = Fs.FileSystem.instance.absBuf(&parts, this.matched_route_buf);
-
- match_result = Match{
- .path = head.path,
- .name = Match.nameWithBasename(file_path, this.map.config.dir),
- .params = this.params,
- .hash = head.full_hash,
- .query_string = this.url_path.query_string,
- .pathname = this.url_path.pathname,
- .basename = entry.base(),
- .file_path = file_path,
- };
-
- this.matched_route_buf[match_result.file_path.len] = 0;
- }
+ const entry = head.entry;
+ var parts = [_]string{ entry.dir, entry.base() };
+ const file_path = Fs.FileSystem.instance.absBuf(&parts, this.matched_route_buf);
+
+ match_result = Match{
+ .path = head.path,
+ .name = Match.nameWithBasename(file_path, this.map.config.dir),
+ .params = this.params,
+ .hash = head.full_hash,
+ .query_string = this.url_path.query_string,
+ .pathname = this.url_path.pathname,
+ .basename = entry.base(),
+ .file_path = file_path,
+ };
+
+ this.matched_route_buf[match_result.file_path.len] = 0;
}
// Now that we know for sure the route will match, we append the param
@@ -500,7 +496,7 @@ pub const RouteMap = struct {
if (path.len == 0) {
if (this.index) |index| {
- const entry = Fs.FileSystem.DirEntry.EntryStore.instance.at(routes_slice.items(.entry_index)[index]).?;
+ const entry = routes_slice.items(.entry)[index];
const parts = [_]string{ entry.dir, entry.base() };
return Match{
@@ -531,7 +527,7 @@ pub const RouteMap = struct {
const children = routes_slice.items(.hash)[route.children.offset .. route.children.offset + route.children.len];
for (children) |child_hash, i| {
if (child_hash == index_route_hash) {
- const entry = Fs.FileSystem.DirEntry.EntryStore.instance.at(routes_slice.items(.entry_index)[i + route.children.offset]).?;
+ const entry = routes_slice.items(.entry)[i + route.children.offset];
const parts = [_]string{ entry.dir, entry.base() };
const file_path = Fs.FileSystem.instance.absBuf(&parts, file_path_buf);
return Match{
@@ -550,7 +546,7 @@ pub const RouteMap = struct {
// It's an exact route, there are no params
// /foo/bar => /foo/bar.js
} else {
- const entry = Fs.FileSystem.DirEntry.EntryStore.instance.at(route.entry_index).?;
+ const entry = route.entry;
const parts = [_]string{ entry.dir, entry.base() };
const file_path = Fs.FileSystem.instance.absBuf(&parts, file_path_buf);
return Match{