diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/StaticHashMap.zig | 756 | ||||
-rw-r--r-- | src/bun.js/api/bun/socket.zig | 4 | ||||
-rw-r--r-- | src/bun.js/api/bun/spawn.zig | 10 | ||||
-rw-r--r-- | src/bun.js/base.zig | 42 | ||||
-rw-r--r-- | src/bun.js/event_loop.zig | 5 | ||||
-rw-r--r-- | src/bun.js/test/jest.zig | 2 | ||||
-rw-r--r-- | src/bundler/bundle_v2.zig | 2 | ||||
-rw-r--r-- | src/cli/run_command.zig | 349 | ||||
-rw-r--r-- | src/deps/uws.zig | 17 | ||||
-rw-r--r-- | src/env_loader.zig | 2 | ||||
-rw-r--r-- | src/install/default-trusted-dependencies.txt | 500 | ||||
-rw-r--r-- | src/install/install-scripts-allowlist.txt | 4 | ||||
-rw-r--r-- | src/install/install.zig | 89 | ||||
-rw-r--r-- | src/install/lockfile.zig | 66 | ||||
-rw-r--r-- | src/io/io_darwin.zig | 2 | ||||
-rw-r--r-- | src/io/io_linux.zig | 2 | ||||
-rw-r--r-- | src/network_thread.zig | 2 |
17 files changed, 1796 insertions, 58 deletions
diff --git a/src/StaticHashMap.zig b/src/StaticHashMap.zig new file mode 100644 index 000000000..e0cbb7cc6 --- /dev/null +++ b/src/StaticHashMap.zig @@ -0,0 +1,756 @@ +// https://github.com/lithdew/rheia/blob/162293d0f0e8d6572a8954c0add83f13f76b3cc6/hash_map.zig +// Apache License 2.0 +const std = @import("std"); + +const mem = std.mem; +const math = std.math; +const testing = std.testing; + +const assert = std.debug.assert; + +pub fn AutoHashMap(comptime K: type, comptime V: type, comptime max_load_percentage: comptime_int) type { + return HashMap(K, V, std.hash_map.AutoContext(K), max_load_percentage); +} + +pub fn AutoStaticHashMap(comptime K: type, comptime V: type, comptime capacity: comptime_int) type { + return StaticHashMap(K, V, std.hash_map.AutoContext(K), capacity); +} + +pub fn StaticHashMap(comptime K: type, comptime V: type, comptime Context: type, comptime capacity: usize) type { + assert(math.isPowerOfTwo(capacity)); + + const shift = 63 - math.log2_int(u64, capacity) + 1; + const overflow = capacity / 10 + (63 - @as(u64, shift) + 1) << 1; + + return struct { + const empty_hash = math.maxInt(u64); + + pub const Entry = struct { + hash: u64 = empty_hash, + key: K = std.mem.zeroes(K), + value: V = std.mem.zeroes(V), + + pub fn isEmpty(self: Entry) bool { + return self.hash == empty_hash; + } + + pub fn format(self: Entry, comptime layout: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = layout; + _ = options; + try std.fmt.format(writer, "(hash: {}, key: {}, value: {})", .{ self.hash, self.key, self.value }); + } + }; + + pub const GetOrPutResult = struct { + value_ptr: *V, + found_existing: bool, + }; + + const Self = @This(); + + entries: [capacity + overflow]Entry = [_]Entry{.{}} ** (capacity + overflow), + len: usize = 0, + shift: u6 = shift, + + // put_probe_count: usize = 0, + // get_probe_count: usize = 0, + // del_probe_count: usize = 0, + + pub usingnamespace HashMapMixin(Self, K, V, Context); + }; +} + +pub fn HashMap(comptime K: type, comptime V: type, comptime Context: type, comptime max_load_percentage: comptime_int) type { + return struct { + const empty_hash = math.maxInt(u64); + + pub const Entry = struct { + hash: u64 = empty_hash, + key: K = undefined, + value: V = undefined, + + pub fn isEmpty(self: Entry) bool { + return self.hash == empty_hash; + } + + pub fn format(self: Entry, comptime layout: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = layout; + _ = options; + try std.fmt.format(writer, "(hash: {}, key: {}, value: {})", .{ self.hash, self.key, self.value }); + } + }; + + pub const GetOrPutResult = struct { + value_ptr: *V, + found_existing: bool, + }; + + const Self = @This(); + + entries: [*]Entry, + len: usize = 0, + shift: u6, + + // put_probe_count: usize = 0, + // get_probe_count: usize = 0, + // del_probe_count: usize = 0, + + pub usingnamespace HashMapMixin(Self, K, V, Context); + + pub fn initCapacity(gpa: mem.Allocator, capacity: u64) !Self { + assert(math.isPowerOfTwo(capacity)); + + const shift = 63 - math.log2_int(u64, capacity) + 1; + const overflow = capacity / 10 + (63 - @as(u64, shift) + 1) << 1; + + const entries = try gpa.alloc(Entry, @as(usize, @intCast(capacity + overflow))); + @memset(entries, .{}); + + return Self{ + .entries = entries.ptr, + .shift = shift, + }; + } + + pub fn deinit(self: *Self, gpa: mem.Allocator) void { + gpa.free(self.slice()); + } + + pub fn ensureUnusedCapacity(self: *Self, gpa: mem.Allocator, count: usize) !void { + try self.ensureTotalCapacity(gpa, self.len + count); + } + + pub fn ensureTotalCapacity(self: *Self, gpa: mem.Allocator, count: usize) !void { + while (true) { + const capacity = @as(u64, 1) << (63 - self.shift + 1); + if (count <= capacity * max_load_percentage / 100) { + break; + } + try self.grow(gpa); + } + } + + fn grow(self: *Self, gpa: mem.Allocator) !void { + const capacity = @as(u64, 1) << (63 - self.shift + 1); + const overflow = capacity / 10 + (63 - @as(usize, self.shift) + 1) << 1; + const end = self.entries + @as(usize, @intCast(capacity + overflow)); + + var map = try Self.initCapacity(gpa, @as(usize, @intCast(capacity * 2))); + var src = self.entries; + var dst = map.entries; + + while (src != end) { + const entry = src[0]; + + const i = if (!entry.isEmpty()) entry.hash >> map.shift else 0; + const p = map.entries + i; + + dst = if (@intFromPtr(p) >= @intFromPtr(dst)) p else dst; + dst[0] = entry; + + src += 1; + dst += 1; + } + + self.deinit(gpa); + self.entries = map.entries; + self.shift = map.shift; + } + + pub fn put(self: *Self, gpa: mem.Allocator, key: K, value: V) !void { + try self.putContext(gpa, key, value, undefined); + } + + pub fn putContext(self: *Self, gpa: mem.Allocator, key: K, value: V, ctx: Context) !void { + try self.ensureUnusedCapacity(gpa, 1); + self.putAssumeCapacityContext(key, value, ctx); + } + + pub fn getOrPut(self: *Self, gpa: mem.Allocator, key: K) !GetOrPutResult { + return try self.getOrPutContext(gpa, key, undefined); + } + + pub fn getOrPutContext(self: *Self, gpa: mem.Allocator, key: K, ctx: Context) !GetOrPutResult { + try self.ensureUnusedCapacity(gpa, 1); + return self.getOrPutAssumeCapacityContext(key, ctx); + } + }; +} + +fn HashMapMixin( + comptime Self: type, + comptime K: type, + comptime V: type, + comptime Context: type, +) type { + return struct { + pub fn clearRetainingCapacity(self: *Self) void { + @memset(self.slice(), .{}); + self.len = 0; + } + + pub fn slice(self: *Self) []Self.Entry { + const capacity = @as(u64, 1) << (63 - self.shift + 1); + const overflow = capacity / 10 + (63 - @as(usize, self.shift) + 1) << 1; + return self.entries[0..@as(usize, @intCast(capacity + overflow))]; + } + + pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { + self.putAssumeCapacityContext(key, value, undefined); + } + + pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void { + const result = self.getOrPutAssumeCapacityContext(key, ctx); + if (!result.found_existing) result.value_ptr.* = value; + } + + pub fn getOrPutAssumeCapacity(self: *Self, key: K) Self.GetOrPutResult { + return self.getOrPutAssumeCapacityContext(key, undefined); + } + + pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) Self.GetOrPutResult { + var it: Self.Entry = .{ .hash = ctx.hash(key), .key = key, .value = undefined }; + var i = it.hash >> self.shift; + + assert(it.hash != Self.empty_hash); + + var inserted_at: ?usize = null; + while (true) : (i += 1) { + const entry = self.entries[i]; + if (entry.hash >= it.hash) { + if (ctx.eql(entry.key, key)) { + return .{ .found_existing = true, .value_ptr = &self.entries[i].value }; + } + self.entries[i] = it; + if (entry.isEmpty()) { + self.len += 1; + return .{ .found_existing = false, .value_ptr = &self.entries[inserted_at orelse i].value }; + } + if (inserted_at == null) { + inserted_at = i; + } + it = entry; + } + // self.put_probe_count += 1; + } + } + + pub fn get(self: *Self, key: K) ?V { + return self.getContext(key, undefined); + } + + pub fn getContext(self: *Self, key: K, ctx: Context) ?V { + const hash = ctx.hash(key); + assert(hash != Self.empty_hash); + + var i = hash >> self.shift; + while (true) : (i += 1) { + const entry = self.entries[i]; + if (entry.hash >= hash) { + if (!ctx.eql(entry.key, key)) { + return null; + } + return entry.value; + } + // self.get_probe_count += 1; + } + } + + pub fn has(self: *Self, key: K) bool { + return self.hasContext(key, undefined); + } + + pub fn hasContext(self: *Self, key: K, ctx: Context) bool { + const hash = ctx.hash(key); + assert(hash != Self.empty_hash); + + var i = hash >> self.shift; + while (true) : (i += 1) { + const entry = self.entries[i]; + if (entry.hash >= hash) { + if (!ctx.eql(entry.key, key)) { + return false; + } + return true; + } + // self.get_probe_count += 1; + } + } + + pub fn delete(self: *Self, key: K) ?V { + return self.deleteContext(key, undefined); + } + + pub fn deleteContext(self: *Self, key: K, ctx: Context) ?V { + const hash = ctx.hash(key); + assert(hash != Self.empty_hash); + + var i = hash >> self.shift; + while (true) : (i += 1) { + const entry = self.entries[i]; + if (entry.hash >= hash) { + if (!ctx.eql(entry.key, key)) { + return null; + } + break; + } + // self.del_probe_count += 1; + } + + const value = self.entries[i].value; + + while (true) : (i += 1) { + const j = self.entries[i + 1].hash >> self.shift; + if (i < j or self.entries[i + 1].isEmpty()) { + break; + } + self.entries[i] = self.entries[i + 1]; + // self.del_probe_count += 1; + } + self.entries[i] = .{}; + self.len -= 1; + + return value; + } + }; +} + +pub fn SortedHashMap(comptime V: type, comptime max_load_percentage: comptime_int) type { + return struct { + const empty_hash: [32]u8 = [_]u8{0xFF} ** 32; + + pub const Entry = struct { + hash: [32]u8 = empty_hash, + value: V = undefined, + + pub fn isEmpty(self: Entry) bool { + return cmp(self.hash, empty_hash) == .eq; + } + + pub fn format(self: Entry, comptime layout: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = layout; + _ = options; + try std.fmt.format(writer, "(hash: {}, value: {})", .{ std.fmt.fmtSliceHexLower(mem.asBytes(&self.hash)), self.value }); + } + }; + + const Self = @This(); + + entries: [*]Entry, + len: usize = 0, + shift: u6, + + // put_probe_count: usize = 0, + // get_probe_count: usize = 0, + // del_probe_count: usize = 0, + + pub fn init(gpa: mem.Allocator) !Self { + return Self.initCapacity(gpa, 16); + } + + pub fn initCapacity(gpa: mem.Allocator, capacity: u64) !Self { + assert(math.isPowerOfTwo(capacity)); + + const shift = 63 - math.log2_int(u64, capacity) + 1; + const overflow = capacity / 10 + (63 - @as(u64, shift) + 1) << 1; + + const entries = try gpa.alloc(Entry, @as(usize, @intCast(capacity + overflow))); + @memset(entries, Entry{}); + + return Self{ + .entries = entries.ptr, + .shift = shift, + }; + } + + pub fn deinit(self: *Self, gpa: mem.Allocator) void { + gpa.free(self.slice()); + } + + /// The following routine has its branches optimized against inputs that are cryptographic hashes by + /// assuming that if the first 64 bits of 'a' and 'b' are equivalent, then 'a' and 'b' are most likely + /// equivalent. + fn cmp(a: [32]u8, b: [32]u8) math.Order { + const msa = @as(u64, @bitCast(a[0..8].*)); + const msb = @as(u64, @bitCast(b[0..8].*)); + if (msa != msb) { + return if (mem.bigToNative(u64, msa) < mem.bigToNative(u64, msb)) .lt else .gt; + } else if (@reduce(.And, @as(@Vector(32, u8), a) == @as(@Vector(32, u8), b))) { + return .eq; + } else { + switch (math.order(mem.readIntBig(u64, a[8..16]), mem.readIntBig(u64, b[8..16]))) { + .eq => {}, + .lt => return .lt, + .gt => return .gt, + } + switch (math.order(mem.readIntBig(u64, a[16..24]), mem.readIntBig(u64, b[16..24]))) { + .eq => {}, + .lt => return .lt, + .gt => return .gt, + } + return math.order(mem.readIntBig(u64, a[24..32]), mem.readIntBig(u64, b[24..32])); + } + } + + /// In release-fast mode, LLVM will optimize this routine to utilize 109 cycles. This routine scatters + /// hash values across a table into buckets which are lexicographically ordered from one another in + /// ascending order. + fn idx(a: [32]u8, shift: u6) usize { + return @as(usize, @intCast(mem.readIntBig(u64, a[0..8]) >> shift)); + } + + pub fn clearRetainingCapacity(self: *Self) void { + @memset(self.slice(), Entry{}); + self.len = 0; + } + + pub fn slice(self: *Self) []Entry { + const capacity = @as(u64, 1) << (63 - self.shift + 1); + const overflow = capacity / 10 + (63 - @as(usize, self.shift) + 1) << 1; + return self.entries[0..@as(usize, @intCast(capacity + overflow))]; + } + + pub fn ensureUnusedCapacity(self: *Self, gpa: mem.Allocator, count: usize) !void { + try self.ensureTotalCapacity(gpa, self.len + count); + } + + pub fn ensureTotalCapacity(self: *Self, gpa: mem.Allocator, count: usize) !void { + while (true) { + const capacity = @as(u64, 1) << (63 - self.shift + 1); + if (count <= capacity * max_load_percentage / 100) { + break; + } + try self.grow(gpa); + } + } + + fn grow(self: *Self, gpa: mem.Allocator) !void { + const capacity = @as(u64, 1) << (63 - self.shift + 1); + const overflow = capacity / 10 + (63 - @as(usize, self.shift) + 1) << 1; + const end = self.entries + @as(usize, @intCast(capacity + overflow)); + + var map = try Self.initCapacity(gpa, @as(usize, @intCast(capacity * 2))); + var src = self.entries; + var dst = map.entries; + + while (src != end) { + const entry = src[0]; + + const i = if (!entry.isEmpty()) idx(entry.hash, map.shift) else 0; + const p = map.entries + i; + + dst = if (@intFromPtr(p) >= @intFromPtr(dst)) p else dst; + dst[0] = entry; + + src += 1; + dst += 1; + } + + self.deinit(gpa); + self.entries = map.entries; + self.shift = map.shift; + } + + pub fn put(self: *Self, gpa: mem.Allocator, key: [32]u8, value: V) !void { + try self.ensureUnusedCapacity(gpa, 1); + self.putAssumeCapacity(key, value); + } + + pub fn putAssumeCapacity(self: *Self, key: [32]u8, value: V) void { + const result = self.getOrPutAssumeCapacity(key); + if (!result.found_existing) result.value_ptr.* = value; + } + + pub const GetOrPutResult = struct { + value_ptr: *V, + found_existing: bool, + }; + + pub fn getOrPut(self: *Self, gpa: mem.Allocator, key: [32]u8) !GetOrPutResult { + try self.ensureUnusedCapacity(gpa, 1); + return self.getOrPutAssumeCapacity(key); + } + + pub fn getOrPutAssumeCapacity(self: *Self, key: [32]u8) GetOrPutResult { + assert(self.len < (@as(u64, 1) << (63 - self.shift + 1))); + assert(cmp(key, empty_hash) != .eq); + + var it: Entry = .{ .hash = key, .value = undefined }; + var i = idx(key, self.shift); + + var inserted_at: ?usize = null; + while (true) : (i += 1) { + const entry = self.entries[i]; + if (cmp(entry.hash, it.hash).compare(.gte)) { + if (cmp(entry.hash, key) == .eq) { + return .{ .found_existing = true, .value_ptr = &self.entries[i].value }; + } + self.entries[i] = it; + if (entry.isEmpty()) { + self.len += 1; + return .{ .found_existing = false, .value_ptr = &self.entries[inserted_at orelse i].value }; + } + if (inserted_at == null) { + inserted_at = i; + } + it = entry; + } + self.put_probe_count += 1; + } + } + + pub fn get(self: *Self, key: [32]u8) ?V { + assert(cmp(key, empty_hash) != .eq); + + var i = idx(key, self.shift); + while (true) : (i += 1) { + const entry = self.entries[i]; + if (cmp(entry.hash, key).compare(.gte)) { + if (cmp(entry.hash, key) != .eq) { + return null; + } + return entry.value; + } + // self.get_probe_count += 1; + } + } + + pub fn delete(self: *Self, key: [32]u8) ?V { + assert(cmp(key, empty_hash) != .eq); + + var i = idx(key, self.shift); + while (true) : (i += 1) { + const entry = self.entries[i]; + if (cmp(entry.hash, key).compare(.gte)) { + if (cmp(entry.hash, key) != .eq) { + return null; + } + break; + } + self.del_probe_count += 1; + } + + const value = self.entries[i].value; + + while (true) : (i += 1) { + const j = idx(self.entries[i + 1].hash, self.shift); + if (i < j or self.entries[i + 1].isEmpty()) { + break; + } + self.entries[i] = self.entries[i + 1]; + self.del_probe_count += 1; + } + self.entries[i] = .{}; + self.len -= 1; + + return value; + } + }; +} + +test "StaticHashMap: put, get, delete, grow" { + var map: AutoStaticHashMap(usize, usize, 512) = .{}; + + var seed: usize = 0; + while (seed < 128) : (seed += 1) { + var rng = std.rand.DefaultPrng.init(seed); + + const keys = try testing.allocator.alloc(usize, 512); + defer testing.allocator.free(keys); + + for (keys) |*key| key.* = @as(usize, rng.next()); + + try testing.expectEqual(@as(u6, 55), map.shift); + + for (keys, 0..) |key, i| map.putAssumeCapacity(key, i); + try testing.expectEqual(keys.len, map.len); + + var it: usize = 0; + for (map.slice()) |entry| { + if (!entry.isEmpty()) { + if (it > entry.hash) { + return error.Unsorted; + } + it = entry.hash; + } + } + + for (keys, 0..) |key, i| try testing.expectEqual(i, map.get(key).?); + for (keys, 0..) |key, i| try testing.expectEqual(i, map.delete(key).?); + } +} + +test "HashMap: put, get, delete, grow" { + var seed: usize = 0; + while (seed < 128) : (seed += 1) { + var rng = std.rand.DefaultPrng.init(seed); + + const keys = try testing.allocator.alloc(usize, 512); + defer testing.allocator.free(keys); + + for (keys) |*key| key.* = rng.next(); + + var map = try AutoHashMap(usize, usize, 50).initCapacity(testing.allocator, 16); + defer map.deinit(testing.allocator); + + try testing.expectEqual(@as(u6, 60), map.shift); + + for (keys, 0..) |key, i| try map.put(testing.allocator, key, i); + + try testing.expectEqual(@as(u6, 54), map.shift); + try testing.expectEqual(keys.len, map.len); + + var it: usize = 0; + for (map.slice()) |entry| { + if (!entry.isEmpty()) { + if (it > entry.hash) { + return error.Unsorted; + } + it = entry.hash; + } + } + + for (keys, 0..) |key, i| try testing.expectEqual(i, map.get(key).?); + for (keys, 0..) |key, i| try testing.expectEqual(i, map.delete(key).?); + } +} + +test "SortedHashMap: cmp" { + const prefix = [_]u8{'0'} ** 8 ++ [_]u8{'1'} ** 23; + const a = prefix ++ [_]u8{0}; + const b = prefix ++ [_]u8{1}; + + try testing.expect(SortedHashMap(void, 100).cmp(a, b) == .lt); + try testing.expect(SortedHashMap(void, 100).cmp(b, a) == .gt); + try testing.expect(SortedHashMap(void, 100).cmp(a, a) == .eq); + try testing.expect(SortedHashMap(void, 100).cmp(b, b) == .eq); + try testing.expect(SortedHashMap(void, 100).cmp([_]u8{'i'} ++ [_]u8{'0'} ** 31, [_]u8{'o'} ++ [_]u8{'0'} ** 31) == .lt); + try testing.expect(SortedHashMap(void, 100).cmp([_]u8{ 'h', 'i' } ++ [_]u8{'0'} ** 30, [_]u8{ 'h', 'o' } ++ [_]u8{'0'} ** 30) == .lt); +} + +test "SortedHashMap: put, get, delete, grow" { + var seed: usize = 0; + while (seed < 128) : (seed += 1) { + var rng = std.rand.DefaultPrng.init(seed); + + const keys = try testing.allocator.alloc([32]u8, 512); + defer testing.allocator.free(keys); + + for (keys) |*key| rng.fill(key); + + var map = try SortedHashMap(usize, 50).initCapacity(testing.allocator, 16); + defer map.deinit(testing.allocator); + + try testing.expectEqual(@as(u6, 60), map.shift); + + for (keys, 0..) |key, i| try map.put(testing.allocator, key, i); + + try testing.expectEqual(@as(u6, 54), map.shift); + try testing.expectEqual(keys.len, map.len); + + var it = [_]u8{0} ** 32; + for (map.slice()) |entry| { + if (!entry.isEmpty()) { + if (!mem.order(u8, &it, &entry.hash).compare(.lte)) { + return error.Unsorted; + } + it = entry.hash; + } + } + + for (keys, 0..) |key, i| try testing.expectEqual(i, map.get(key).?); + for (keys, 0..) |key, i| try testing.expectEqual(i, map.delete(key).?); + } +} + +test "SortedHashMap: collision test" { + const prefix = [_]u8{22} ** 8 ++ [_]u8{1} ** 23; + + var map = try SortedHashMap(usize, 100).initCapacity(testing.allocator, 4); + defer map.deinit(testing.allocator); + + try map.put(testing.allocator, prefix ++ [_]u8{0}, 0); + try map.put(testing.allocator, prefix ++ [_]u8{1}, 1); + try map.put(testing.allocator, prefix ++ [_]u8{2}, 2); + try map.put(testing.allocator, prefix ++ [_]u8{3}, 3); + + var it = [_]u8{0} ** 32; + for (map.slice()) |entry| { + if (!entry.isEmpty()) { + if (!mem.order(u8, &it, &entry.hash).compare(.lte)) { + return error.Unsorted; + } + it = entry.hash; + } + } + + try testing.expectEqual(@as(usize, 0), map.get(prefix ++ [_]u8{0}).?); + try testing.expectEqual(@as(usize, 1), map.get(prefix ++ [_]u8{1}).?); + try testing.expectEqual(@as(usize, 2), map.get(prefix ++ [_]u8{2}).?); + try testing.expectEqual(@as(usize, 3), map.get(prefix ++ [_]u8{3}).?); + + try testing.expectEqual(@as(usize, 2), map.delete(prefix ++ [_]u8{2}).?); + try testing.expectEqual(@as(usize, 0), map.delete(prefix ++ [_]u8{0}).?); + try testing.expectEqual(@as(usize, 1), map.delete(prefix ++ [_]u8{1}).?); + try testing.expectEqual(@as(usize, 3), map.delete(prefix ++ [_]u8{3}).?); + + try map.put(testing.allocator, prefix ++ [_]u8{0}, 0); + try map.put(testing.allocator, prefix ++ [_]u8{2}, 2); + try map.put(testing.allocator, prefix ++ [_]u8{3}, 3); + try map.put(testing.allocator, prefix ++ [_]u8{1}, 1); + + it = [_]u8{0} ** 32; + for (map.slice()) |entry| { + if (!entry.isEmpty()) { + if (!mem.order(u8, &it, &entry.hash).compare(.lte)) { + return error.Unsorted; + } + it = entry.hash; + } + } + + try testing.expectEqual(@as(usize, 0), map.delete(prefix ++ [_]u8{0}).?); + try testing.expectEqual(@as(usize, 1), map.delete(prefix ++ [_]u8{1}).?); + try testing.expectEqual(@as(usize, 2), map.delete(prefix ++ [_]u8{2}).?); + try testing.expectEqual(@as(usize, 3), map.delete(prefix ++ [_]u8{3}).?); + + try map.put(testing.allocator, prefix ++ [_]u8{0}, 0); + try map.put(testing.allocator, prefix ++ [_]u8{2}, 2); + try map.put(testing.allocator, prefix ++ [_]u8{1}, 1); + try map.put(testing.allocator, prefix ++ [_]u8{3}, 3); + + it = [_]u8{0} ** 32; + for (map.slice()) |entry| { + if (!entry.isEmpty()) { + if (!mem.order(u8, &it, &entry.hash).compare(.lte)) { + return error.Unsorted; + } + it = entry.hash; + } + } + + try testing.expectEqual(@as(usize, 3), map.delete(prefix ++ [_]u8{3}).?); + try testing.expectEqual(@as(usize, 2), map.delete(prefix ++ [_]u8{2}).?); + try testing.expectEqual(@as(usize, 1), map.delete(prefix ++ [_]u8{1}).?); + try testing.expectEqual(@as(usize, 0), map.delete(prefix ++ [_]u8{0}).?); + + try map.put(testing.allocator, prefix ++ [_]u8{3}, 3); + try map.put(testing.allocator, prefix ++ [_]u8{0}, 0); + try map.put(testing.allocator, prefix ++ [_]u8{1}, 1); + try map.put(testing.allocator, prefix ++ [_]u8{2}, 2); + + it = [_]u8{0} ** 32; + for (map.slice()) |entry| { + if (!entry.isEmpty()) { + if (!mem.order(u8, &it, &entry.hash).compare(.lte)) { + return error.Unsorted; + } + it = entry.hash; + } + } + + try testing.expectEqual(@as(usize, 3), map.delete(prefix ++ [_]u8{3}).?); + try testing.expectEqual(@as(usize, 0), map.delete(prefix ++ [_]u8{0}).?); + try testing.expectEqual(@as(usize, 1), map.delete(prefix ++ [_]u8{1}).?); + try testing.expectEqual(@as(usize, 2), map.delete(prefix ++ [_]u8{2}).?); +} diff --git a/src/bun.js/api/bun/socket.zig b/src/bun.js/api/bun/socket.zig index 24e5db5c6..e89ee5aa1 100644 --- a/src/bun.js/api/bun/socket.zig +++ b/src/bun.js/api/bun/socket.zig @@ -582,7 +582,7 @@ pub const Listener = struct { var socket_context = uws.us_create_bun_socket_context( @intFromBool(ssl_enabled), - uws.Loop.get().?, + uws.Loop.get(), @sizeOf(usize), ctx_opts, ) orelse { @@ -919,7 +919,7 @@ pub const Listener = struct { globalObject.bunVM().eventLoop().ensureWaker(); - var socket_context = uws.us_create_bun_socket_context(@intFromBool(ssl_enabled), uws.Loop.get().?, @sizeOf(usize), ctx_opts).?; + var socket_context = uws.us_create_bun_socket_context(@intFromBool(ssl_enabled), uws.Loop.get(), @sizeOf(usize), ctx_opts).?; var connection: Listener.UnixOrHost = if (port) |port_| .{ .host = .{ .host = (hostname_or_unix.cloneIfNeeded(bun.default_allocator) catch unreachable).slice(), .port = port_ }, } else .{ diff --git a/src/bun.js/api/bun/spawn.zig b/src/bun.js/api/bun/spawn.zig index 8a8ec3fff..3d26bbbfb 100644 --- a/src/bun.js/api/bun/spawn.zig +++ b/src/bun.js/api/bun/spawn.zig @@ -30,13 +30,13 @@ const errno = std.os.errno; const mode_t = std.os.mode_t; const unexpectedErrno = std.os.unexpectedErrno; -pub const WaitPidResult = struct { - pid: pid_t, - status: u32, -}; - // mostly taken from zig's posix_spawn.zig pub const PosixSpawn = struct { + pub const WaitPidResult = struct { + pid: pid_t, + status: u32, + }; + pub const Attr = struct { attr: system.posix_spawnattr_t, diff --git a/src/bun.js/base.zig b/src/bun.js/base.zig index 6fcfb8a36..70e5351cc 100644 --- a/src/bun.js/base.zig +++ b/src/bun.js/base.zig @@ -23,6 +23,7 @@ const uws = @import("root").bun.uws; const Body = WebCore.Body; const TaggedPointerTypes = @import("../tagged_pointer.zig"); const TaggedPointerUnion = TaggedPointerTypes.TaggedPointerUnion; +const PackageManager = @import("../install/install.zig").PackageManager; pub const ExceptionValueRef = [*c]js.JSValueRef; pub const JSValueRef = js.JSValueRef; @@ -1719,6 +1720,10 @@ pub const FilePoll = struct { pub var owner: Owner = Owner.init(@as(*Deactivated, @ptrFromInt(@as(usize, 0xDEADBEEF)))); }; + const RunCommand = @import("../../src/cli/run_command.zig").RunCommand; + const PostinstallSubprocess = RunCommand.PostinstallSubprocess; + const PostinstallSubprocessPid = RunCommand.PostinstallSubprocess.PidPollData; + pub const Owner = bun.TaggedPointerUnion(.{ FileReader, FileSink, @@ -1728,6 +1733,8 @@ pub const FilePoll = struct { Deactivated, DNSResolver, GetAddrInfoRequest, + PostinstallSubprocess, + PostinstallSubprocessPid, }); fn updateFlags(poll: *FilePoll, updated: Flags.Set) void { @@ -1833,7 +1840,6 @@ pub const FilePoll = struct { @field(Owner.Tag, "Subprocess") => { log("onUpdate " ++ kqueue_or_epoll ++ " (fd: {d}) Subprocess", .{poll.fd}); var loader = ptr.as(JSC.Subprocess); - loader.onExitNotification(); }, @field(Owner.Tag, "FileSink") => { @@ -1841,18 +1847,26 @@ pub const FilePoll = struct { var loader = ptr.as(JSC.WebCore.FileSink); loader.onPoll(size_or_offset, 0); }, - @field(Owner.Tag, "DNSResolver") => { log("onUpdate " ++ kqueue_or_epoll ++ " (fd: {d}) DNSResolver", .{poll.fd}); var loader: *DNSResolver = ptr.as(DNSResolver); loader.onDNSPoll(poll); }, - @field(Owner.Tag, "GetAddrInfoRequest") => { log("onUpdate " ++ kqueue_or_epoll ++ " (fd: {d}) GetAddrInfoRequest", .{poll.fd}); var loader: *GetAddrInfoRequest = ptr.as(GetAddrInfoRequest); loader.onMachportChange(); }, + @field(Owner.Tag, "PostinstallSubprocess") => { + log("onUpdate " ++ kqueue_or_epoll ++ " (fd: {d}) PostinstallSubprocess Output", .{poll.fd}); + var loader: *PostinstallSubprocess = ptr.as(PostinstallSubprocess); + loader.onOutputUpdate(size_or_offset, poll.fileDescriptor()); + }, + @field(Owner.Tag, "PidPollData") => { + log("onUpdate " ++ kqueue_or_epoll ++ " (fd: {d}) PostinstallSubprocess Pid", .{poll.fd}); + var loader: *PostinstallSubprocess = ptr.as(PostinstallSubprocess); + loader.onProcessUpdate(size_or_offset); + }, else => { log("onUpdate " ++ kqueue_or_epoll ++ " (fd: {d}) disconnected?", .{poll.fd}); @@ -1860,6 +1874,10 @@ pub const FilePoll = struct { } } + pub inline fn fileDescriptor(this: *FilePoll) bun.FileDescriptor { + return @intCast(this.fd); + } + pub const Flags = enum { // What are we asking the event loop about? @@ -2088,6 +2106,24 @@ pub const FilePoll = struct { return poll; } + pub fn initWithPackageManager(m: *PackageManager, fd: bun.FileDescriptor, flags: Flags.Struct, owner: anytype) *FilePoll { + return initWithPackageManagerWithOwner(m, fd, flags, Owner.init(owner)); + } + + pub fn initWithPackageManagerWithOwner(manager: *PackageManager, fd: bun.FileDescriptor, flags: Flags.Struct, owner: Owner) *FilePoll { + var poll = manager.file_poll_store.get(); + poll.fd = @intCast(fd); + poll.flags = Flags.Set.init(flags); + poll.owner = owner; + poll.next_to_free = null; + + if (KQueueGenerationNumber != u0) { + max_generation_number +%= 1; + poll.generation_number = max_generation_number; + } + return poll; + } + pub inline fn canRef(this: *const FilePoll) bool { if (this.flags.contains(.disable)) return false; diff --git a/src/bun.js/event_loop.zig b/src/bun.js/event_loop.zig index c41b05e15..bdee2cefb 100644 --- a/src/bun.js/event_loop.zig +++ b/src/bun.js/event_loop.zig @@ -1166,8 +1166,7 @@ pub const EventLoop = struct { pub fn ensureWaker(this: *EventLoop) void { JSC.markBinding(@src()); if (this.virtual_machine.event_loop_handle == null) { - var actual = uws.Loop.get().?; - this.virtual_machine.event_loop_handle = actual; + this.virtual_machine.event_loop_handle = uws.Loop.get(); this.virtual_machine.gc_controller.init(this.virtual_machine); // _ = actual.addPostHandler(*JSC.EventLoop, this, JSC.EventLoop.afterUSocketsTick); // _ = actual.addPreHandler(*JSC.VM, this.virtual_machine.jsc, JSC.VM.drainMicrotasks); @@ -1208,7 +1207,7 @@ pub const MiniEventLoop = struct { return .{ .tasks = Queue.init(allocator), .allocator = allocator, - .loop = uws.Loop.get().?, + .loop = uws.Loop.get(), }; } diff --git a/src/bun.js/test/jest.zig b/src/bun.js/test/jest.zig index 5490b3472..a95897414 100644 --- a/src/bun.js/test/jest.zig +++ b/src/bun.js/test/jest.zig @@ -127,7 +127,7 @@ pub const TestRunner = struct { if (milliseconds > 0) { if (this.test_timeout_timer == null) { - this.test_timeout_timer = bun.uws.Timer.createFallthrough(bun.uws.Loop.get().?, this); + this.test_timeout_timer = bun.uws.Timer.createFallthrough(bun.uws.Loop.get(), this); } if (this.last_test_timeout_timer_duration != milliseconds) { diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index e5b73814c..f2595c16d 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -1163,7 +1163,7 @@ pub const BundleV2 = struct { thread.detach(); } else { BundleThread.instance.queue.push(completion); - BundleThread.instance.waker.wake() catch {}; + BundleThread.instance.waker.wake(); } completion.poll_ref.ref(globalThis.bunVM()); diff --git a/src/cli/run_command.zig b/src/cli/run_command.zig index 19046e292..1c5f3e1ec 100644 --- a/src/cli/run_command.zig +++ b/src/cli/run_command.zig @@ -9,6 +9,8 @@ const stringZ = bun.stringZ; const default_allocator = bun.default_allocator; const C = bun.C; const std = @import("std"); +const uws = @import("../deps/uws.zig"); +const JSC = bun.JSC; const lex = bun.js_lexer; const logger = @import("root").bun.logger; @@ -41,6 +43,9 @@ const NpmArgs = struct { const yarn_commands: []u64 = @import("./list-of-yarn-commands.zig").all_yarn_commands; const ShellCompletions = @import("./shell_completions.zig"); +const PosixSpawn = @import("../bun.js/api/bun/spawn.zig").PosixSpawn; + +const PackageManager = @import("../install/install.zig").PackageManager; pub const RunCommand = struct { const shells_to_search = &[_]string{ @@ -49,7 +54,7 @@ pub const RunCommand = struct { "zsh", }; - pub fn findShell(PATH: string, cwd: string) ?string { + pub fn findShell(PATH: string, cwd: string) ?stringZ { if (comptime Environment.isWindows) { return "C:\\Windows\\System32\\cmd.exe"; } @@ -225,7 +230,338 @@ pub const RunCommand = struct { const log = Output.scoped(.RUN, false); - pub fn runPackageScript( + pub const PostinstallSubprocess = struct { + script_name: []const u8, + package_name: []const u8, + + finished_fds: u8 = 0, + + output_buffer: bun.ByteList, + pid_poll: *JSC.FilePoll, + waitpid_result: ?PosixSpawn.WaitPidResult, + stdout_poll: *JSC.FilePoll, + stderr_poll: *JSC.FilePoll, + package_manager: *PackageManager, + + /// A "nothing" struct that lets us reuse the same pointer + /// but with a different tag for the file poll + pub const PidPollData = struct { process: PostinstallSubprocess }; + + pub fn init( + manager: *PackageManager, + script_name: []const u8, + package_name: []const u8, + stdout_fd: bun.FileDescriptor, + stderr_fd: bun.FileDescriptor, + pid_fd: bun.FileDescriptor, + ) !?*PostinstallSubprocess { + // TODO: this doesnt handle some cleanup edge cases on error + var this = try manager.allocator.create(PostinstallSubprocess); + errdefer this.deinit(manager.allocator); + + this.* = .{ + .package_name = package_name, + .script_name = script_name, + .package_manager = manager, + .waitpid_result = null, + .output_buffer = .{}, + .pid_poll = JSC.FilePoll.initWithPackageManager( + manager, + pid_fd, + .{}, + @as(*PidPollData, @ptrCast(this)), + ), + .stdout_poll = JSC.FilePoll.initWithPackageManager(manager, stdout_fd, .{}, this), + .stderr_poll = JSC.FilePoll.initWithPackageManager(manager, stderr_fd, .{}, this), + }; + + try this.stdout_poll.register(manager.uws_event_loop, .readable, false).throw(); + try this.stderr_poll.register(manager.uws_event_loop, .readable, false).throw(); + + switch (this.pid_poll.register( + manager.uws_event_loop, + .process, + true, + )) { + .result => {}, + .err => |err| { + // Sometimes the pid poll can fail to register if the process exits + // between posix_spawn() and pid_poll.register(), but it is unlikely. + // Any other error is unexpected here. + if (err.getErrno() != .SRCH) { + @panic("This shouldn't happen. Could not register pid poll"); + } + + this.package_manager.pending_tasks -= 1; + this.onProcessUpdate(0); + return null; + }, + } + + return this; + } + + pub fn onOutputUpdate(this: *PostinstallSubprocess, size: i64, fd: bun.FileDescriptor) void { + var needed_capacity = this.output_buffer.len + @as(u32, @intCast(size)); + _ = needed_capacity; + this.output_buffer.ensureUnusedCapacity(this.package_manager.allocator, @intCast(size)) catch @panic("Failed to allocate memory for output buffer"); + + if (size == 0) { + this.finished_fds += 1; + if (this.waitpid_result) |result| { + if (this.finished_fds == 2) { + this.onResult(result); + } + } + return; + } + + var remaining = size; + while (remaining > 0) { + const n: u32 = @truncate(std.os.read( + fd, + this.output_buffer.ptr[this.output_buffer.len..this.output_buffer.cap], + ) catch return); + this.output_buffer.len += n; + remaining -|= n; + } + } + + pub fn printOutput(this: *PostinstallSubprocess) void { + Output.errorWriter().writeAll(this.output_buffer.slice()) catch {}; + } + + pub fn onProcessUpdate(this: *PostinstallSubprocess, _: i64) void { + switch (PosixSpawn.waitpid(this.pid_poll.fileDescriptor(), std.os.W.NOHANG)) { + .err => |err| { + Output.prettyErrorln("<r><red>error<r>: Failed to run <b>{s}<r> script from \"<b>{s}<r>\" due to error <b>{d} {s}<r>", .{ this.script_name, this.package_name, err.errno, @tagName(err.getErrno()) }); + Output.flush(); + this.package_manager.pending_tasks -= 1; + }, + .result => |result| this.onResult(result), + } + } + + pub fn onResult(this: *PostinstallSubprocess, result: PosixSpawn.WaitPidResult) void { + if (result.pid == 0) { + Output.prettyErrorln("<r><red>error<r>: Failed to run <b>{s}<r> script from \"<b>{s}<r>\" due to error <b>{d} {s}<r>", .{ this.script_name, this.package_name, 0, "Unknown" }); + Output.flush(); + + this.package_manager.pending_tasks -= 1; + return; + } + if (std.os.W.IFEXITED(result.status)) { + defer this.deinit(this.package_manager.allocator); + + const code = std.os.W.EXITSTATUS(result.status); + if (code > 0) { + if (this.finished_fds < 2) { + this.waitpid_result = result; + return; + } + this.printOutput(); + Output.prettyErrorln("<r><red>error<r><d>:<r> <b>{s}<r> script from \"<b>{s}<r>\" exited with {any}<r>", .{ this.script_name, this.package_name, bun.SignalCode.from(code) }); + Output.flush(); + Global.exit(code); + } + + this.package_manager.pending_tasks -= 1; + return; + } + if (std.os.W.IFSIGNALED(result.status)) { + const signal = std.os.W.TERMSIG(result.status); + + if (this.finished_fds < 2) { + this.waitpid_result = result; + return; + } + this.printOutput(); + + Output.prettyErrorln("<r><red>error<r><d>:<r> <b>{s}<r> script from \"<b>{s}<r>\" exited with {any}<r>", .{ this.script_name, this.package_name, bun.SignalCode.from(signal) }); + Output.flush(); + Global.exit(1); + } + if (std.os.W.IFSTOPPED(result.status)) { + const signal = std.os.W.STOPSIG(result.status); + + if (this.finished_fds < 2) { + this.waitpid_result = result; + return; + } + this.printOutput(); + + Output.prettyErrorln("<r><red>error<r><d>:<r> <b>{s}<r> script from \"<b>{s}<r>\" was stopped by signal {any}<r>", .{ this.script_name, this.package_name, bun.SignalCode.from(signal) }); + Output.flush(); + Global.exit(1); + } + } + + pub fn deinit(this: *PostinstallSubprocess, alloc: std.mem.Allocator) void { + _ = this.stdout_poll.unregister(this.package_manager.uws_event_loop, false); + _ = this.stderr_poll.unregister(this.package_manager.uws_event_loop, false); + _ = this.pid_poll.unregister(this.package_manager.uws_event_loop, false); + + _ = bun.sys.close(this.stdout_poll.fileDescriptor()); + _ = bun.sys.close(this.stderr_poll.fileDescriptor()); + _ = bun.sys.close(this.pid_poll.fileDescriptor()); + + alloc.destroy(this); + } + }; + + inline fn spawnScript( + ctx: *PackageManager, + name: string, + package_name: string, + cwd: string, + env: *DotEnv.Loader, + argv: [*:null]?[*:0]const u8, + ) !?*PostinstallSubprocess { + var flags: i32 = bun.C.POSIX_SPAWN_SETSIGDEF | bun.C.POSIX_SPAWN_SETSIGMASK; + if (comptime Environment.isMac) { + flags |= bun.C.POSIX_SPAWN_CLOEXEC_DEFAULT; + } + + var attr = try PosixSpawn.Attr.init(); + defer attr.deinit(); + try attr.set(@intCast(flags)); + try attr.resetSignals(); + + var actions = try PosixSpawn.Actions.init(); + defer actions.deinit(); + try actions.openZ(bun.STDIN_FD, "/dev/null", std.os.O.RDONLY, 0o664); + + // Have both stdout and stderr write to the same buffer + const fdsOut = try std.os.pipe2(0); + try actions.dup2(fdsOut[1], bun.STDOUT_FD); + + const fdsErr = try std.os.pipe2(0); + try actions.dup2(fdsErr[1], bun.STDERR_FD); + + try actions.chdir(cwd); + + var arena = bun.ArenaAllocator.init(ctx.allocator); + defer arena.deinit(); + + const pid = brk: { + defer { + _ = bun.sys.close(fdsOut[1]); + _ = bun.sys.close(fdsErr[1]); + } + switch (PosixSpawn.spawnZ( + argv[0].?, + actions, + attr, + argv, + try env.map.createNullDelimitedEnvMap(arena.allocator()), + )) { + .err => |err| { + Output.prettyErrorln("<r><red>error<r>: Failed to spawn script <b>{s}<r> due to error <b>{d} {s}<r>", .{ name, err.errno, @tagName(err.getErrno()) }); + Output.flush(); + return null; + }, + .result => |pid| break :brk pid, + } + }; + + const pidfd: std.os.fd_t = brk: { + if (!Environment.isLinux) { + break :brk pid; + } + + const kernel = @import("../analytics.zig").GenerateHeader.GeneratePlatform.kernelVersion(); + + // pidfd_nonblock only supported in 5.10+ + const pidfd_flags: u32 = if (kernel.orderWithoutTag(.{ .major = 5, .minor = 10, .patch = 0 }).compare(.gte)) + std.os.O.NONBLOCK + else + 0; + + const fd = std.os.linux.pidfd_open( + pid, + pidfd_flags, + ); + + switch (std.os.linux.getErrno(fd)) { + .SUCCESS => break :brk @as(std.os.fd_t, @intCast(fd)), + else => |err| { + var status: u32 = 0; + // ensure we don't leak the child process on error + _ = std.os.linux.waitpid(pid, &status, 0); + + Output.prettyErrorln("<r><red>error<r>: Failed to spawn script <b>{s}<r> due to error <b>{d} {s}<r>", .{ name, err, @tagName(err) }); + Output.flush(); + + return null; + }, + } + }; + + return try PostinstallSubprocess.init(ctx, name, package_name, fdsOut[0], fdsErr[0], pidfd); + } + + /// Used to execute postinstall scripts + pub fn spawnPackageScript( + ctx: *PackageManager, + original_script: string, + name: string, + package_name: string, + cwd: string, + passthrough: []const string, + silent: bool, + ) !void { + const env = ctx.env; + const shell_bin = findShell(env.map.get("PATH") orelse "", cwd) orelse return error.MissingShell; + + var script = original_script; + var copy_script = try std.ArrayList(u8).initCapacity(ctx.allocator, script.len + 1); + + // We're going to do this slowly. + // Find exact matches of yarn, pnpm, npm + + try replacePackageManagerRun(©_script, script); + try copy_script.append(0); + + var combined_script: [:0]u8 = copy_script.items[0 .. copy_script.items.len - 1 :0]; + + log("Script from pkg \"{s}\" : \"{s}\"", .{ package_name, combined_script }); + + if (passthrough.len > 0) { + var combined_script_len = script.len; + for (passthrough) |p| { + combined_script_len += p.len + 1; + } + var combined_script_buf = try ctx.allocator.allocSentinel(u8, combined_script_len, 0); + bun.copy(u8, combined_script_buf, script); + var remaining_script_buf = combined_script_buf[script.len..]; + for (passthrough) |part| { + var p = part; + remaining_script_buf[0] = ' '; + bun.copy(u8, remaining_script_buf[1..], p); + remaining_script_buf = remaining_script_buf[p.len + 1 ..]; + } + combined_script = combined_script_buf; + } + + if (!silent) { + Output.prettyErrorln("<r><d><magenta>$<r> <d><b>{s}<r>", .{combined_script}); + Output.flush(); + } + + var argv = try ctx.allocator.allocSentinel(?[*:0]const u8, 3, null); + defer ctx.allocator.free(argv); + argv[0] = shell_bin; + argv[1] = "-c"; + argv[2] = combined_script; + + _ = spawnScript(ctx, name, package_name, cwd, env, argv) catch |err| { + Output.prettyErrorln("<r><red>error<r>: Failed to run script <b>{s}<r> due to error <b>{s}<r>", .{ name, @errorName(err) }); + Output.flush(); + return; + }; + } + + pub fn runPackageScriptForeground( allocator: std.mem.Allocator, original_script: string, name: string, @@ -323,6 +659,7 @@ pub const RunCommand = struct { return true; } + pub fn runBinary( ctx: Command.Context, executable: []const u8, @@ -1046,11 +1383,11 @@ pub const RunCommand = struct { else => { if (scripts.get(script_name_to_search)) |script_content| { // allocate enough to hold "post${scriptname}" - var temp_script_buffer = try std.fmt.allocPrint(ctx.allocator, "ppre{s}", .{script_name_to_search}); + defer ctx.allocator.free(temp_script_buffer); if (scripts.get(temp_script_buffer[1..])) |prescript| { - if (!try runPackageScript( + if (!try runPackageScriptForeground( ctx.allocator, prescript, temp_script_buffer[1..], @@ -1063,7 +1400,7 @@ pub const RunCommand = struct { } } - if (!try runPackageScript( + if (!try runPackageScriptForeground( ctx.allocator, script_content, script_name_to_search, @@ -1076,7 +1413,7 @@ pub const RunCommand = struct { temp_script_buffer[0.."post".len].* = "post".*; if (scripts.get(temp_script_buffer)) |postscript| { - if (!try runPackageScript( + if (!try runPackageScriptForeground( ctx.allocator, postscript, temp_script_buffer, diff --git a/src/deps/uws.zig b/src/deps/uws.zig index 8ca0f260b..6d3fb4613 100644 --- a/src/deps/uws.zig +++ b/src/deps/uws.zig @@ -797,12 +797,11 @@ pub const SocketContext = opaque { pub fn deinit(this: *SocketContext, ssl: bool) void { this.close(ssl); //always deinit in next iteration - if (Loop.get()) |loop| { - if (ssl) { - loop.nextTick(*SocketContext, this, SocketContext._deinit_ssl); - } else { - loop.nextTick(*SocketContext, this, SocketContext._deinit); - } + const loop = Loop.get(); + if (ssl) { + loop.nextTick(*SocketContext, this, SocketContext._deinit_ssl); + } else { + loop.nextTick(*SocketContext, this, SocketContext._deinit); } } @@ -906,7 +905,9 @@ pub const Loop = extern struct { this.active -|= @as(u32, @intCast(count)); } - pub fn get() ?*Loop { + /// Lazily initializes a per-thread loop and returns it. + /// Will automatically free all initialized loops at exit. + pub fn get() *Loop { return uws_get_loop(); } @@ -980,7 +981,7 @@ pub const Loop = extern struct { extern fn uws_loop_defer(loop: *Loop, ctx: *anyopaque, cb: *const (fn (ctx: *anyopaque) callconv(.C) void)) void; - extern fn uws_get_loop() ?*Loop; + extern fn uws_get_loop() *Loop; extern fn us_create_loop( hint: ?*anyopaque, wakeup_cb: ?*const fn (*Loop) callconv(.C) void, diff --git a/src/env_loader.zig b/src/env_loader.zig index f0bb91148..f89e566e2 100644 --- a/src/env_loader.zig +++ b/src/env_loader.zig @@ -892,7 +892,7 @@ pub const Map = struct { bun.copy(u8, env_buf[pair.key_ptr.len + 1 ..], pair.value_ptr.value); envp_buf[i] = env_buf.ptr; } - std.debug.assert(i == envp_count); + if (comptime Environment.allow_assert) std.debug.assert(i == envp_count); } return envp_buf; } diff --git a/src/install/default-trusted-dependencies.txt b/src/install/default-trusted-dependencies.txt new file mode 100644 index 000000000..efdb12bdc --- /dev/null +++ b/src/install/default-trusted-dependencies.txt @@ -0,0 +1,500 @@ +@airbnb/node-memwatch +@alaskaairux/icons +@antv/l7-react +@apollo/protobufjs +@apollo/rover +@applitools/eyes-storybook +@appsignal/nodejs +@arkweid/lefthook +@aws-amplify/cli +@azure/msal-node-extensions +@bahmutov/add-typescript-to-cypress +@bazel/concatjs +@bazel/cypress +@bazel/esbuild +@bazel/hide-bazel-files +@bazel/jasmine +@bazel/protractor +@bazel/rollup +@bazel/terser +@bazel/typescript +@bufbuild/buf +@carbon/charts +@carbon/charts-angular +@carbon/charts-react +@carbon/ibm-products +@carbon/icons-react +@carbon/pictograms-react +@carbon/react +@cdktf/node-pty-prebuilt-multiarch +@ckeditor/ckeditor5-react +@ckeditor/ckeditor5-vue +@cloudflare/wrangler +@compodoc/compodoc +@contrast/fn-inspect +@cubejs-backend/cubestore +@cubejs-backend/native +@cypress/snapshot +@danmarshall/deckgl-typings +@databricks/sql +@datadog/mobile-react-native +@datadog/native-appsec +@datadog/native-metrics +@datadog/pprof +@discordjs/opus +@eversdk/lib-node +@evilmartians/lefthook +@ffmpeg-installer/darwin-arm64 +@ffmpeg-installer/darwin-x64 +@ffmpeg-installer/linux-arm +@ffmpeg-installer/linux-arm64 +@ffmpeg-installer/linux-ia32 +@ffmpeg-installer/linux-x64 +@ffprobe-installer/darwin-arm64 +@ffprobe-installer/darwin-x64 +@ffprobe-installer/linux-arm +@ffprobe-installer/linux-arm64 +@ffprobe-installer/linux-ia32 +@ffprobe-installer/linux-x64 +@fingerprintjs/fingerprintjs-pro-react +@fortawesome/fontawesome-common-types +@fortawesome/fontawesome-free +@fortawesome/fontawesome-svg-core +@fortawesome/free-brands-svg-icons +@fortawesome/free-regular-svg-icons +@fortawesome/free-solid-svg-icons +@ghaiklor/x509 +@go-task/cli +@gql2ts/language-typescript +@injectivelabs/sdk-ts +@instana/autoprofile +@intlify/vue-i18n-bridge +@intlify/vue-router-bridge +@lightdash/cli +@matteodisabatino/gc_info +@memlab/cli +@microsoft.azure/autorest-core +@microsoft/teamsfx-cli +@microsoft/ts-command-line +@napi-rs/canvas-linux-x64-gnu +@napi-rs/canvas-linux-x64-musl +@napi-rs/pinyin +@napi-rs/simple-git-linux-arm64-gnu +@napi-rs/simple-git-linux-arm64-musl +@napi-rs/simple-git-linux-x64-gnu +@napi-rs/simple-git-linux-x64-musl +@nativescript/core +@nestjs/core +@netlify/esbuild +@newrelic/native-metrics +@notarize/qlc-cli +@nx-dotnet/core +@openapitools/openapi-generator-cli +@opensea/seaport-js +@opensearch-project/oui +@opentelemetry/instrumentation-grpc +@pact-foundation/pact-core +@pact-foundation/pact-node +@paloaltonetworks/postman-code-generators +@parcel/watcher +@pdftron/pdfnet-node +@percy/core +@pnpm/exe +@prisma/client +@prisma/engines +@progress/kendo-licensing +@pulumi/aws +@pulumi/aws-native +@pulumi/awsx +@pulumi/azure +@pulumi/azure-native +@pulumi/cloudflare +@pulumi/command +@pulumi/datadog +@pulumi/docker +@pulumi/gcp +@pulumi/github +@pulumi/kubernetes +@pulumi/postgresql +@pulumi/random +@pulumi/tls +@replayio/cypress +@replayio/playwright +@root/acme +@roots/bud-framework +@sanity/eslint-config-studio +@sap/hana-client +@sap/hana-performance-tools +@sap/hana-theme-vscode +@scarf/scarf +@sematext/gc-stats +@sentry/capacitor +@sentry/cli +@sentry/profiling-node +@serialport/bindings +@serialport/bindings-cpp +@shopify/ngrok +@shopify/plugin-cloudflare +@shopify/react-native-skia +@sitespeed.io/chromedriver +@sitespeed.io/edgedriver +@softvisio/core +@splunk/otel +@strapi/strapi +@substrate/connect +@sveltejs/kit +@swc/core +@syncfusion/ej2-angular-base +@taquito/taquito +@tds/core-colours +@temporalio/core-bridge +@tensorflow/tfjs-node +@trufflesuite/bigint-buffer +@trumbitta/nx-plugin-unused-deps +@typescript-tools/rust-implementation +@vaadin/vaadin-usage-statistics +@vscode/ripgrep +@vscode/sqlite3 +abstract-socket +admin-lte +appdynamics +appium-chromedriver +appium-windows-driver +applicationinsights-native-metrics +argon2 +autorest +avo +aws-crt +azure-arm-cdn +azure-arm-compute +azure-arm-network +azure-arm-storage +azure-functions-core-tools +azure-streamanalytics-cicd +babylonjs +backport +baseui +bcrypt +better-sqlite3 +bigint-buffer +bigscreen-player +blake-hash +bootstrap-fileinput +bootstrap-vue +browser-tabs-lock +bs-platform +bufferutil +bun +canvacord +canvas +carbon-addons-iot-react +carbon-components +carbon-components-angular +carbon-components-react +cbor-extract +ccxt +chromedriver +chromium +classic-level +cld +cldr-data +clevertap-react-native +clientjs +cmark-gfm +command-join +commitlint-config-jira +compresion +contentlayer +contextify +cordova.plugins.diagnostic +core-js-bundle +couchbase +cpu-features +cwebp-bin +cy2 +cypress +data-forge +dd-trace +deasync +detox +detox-recorder +discord-economy-super +diskusage +docsify +dooboolab-welcome +dotnet-2.0.0 +dprint +drivelist +dtrace-provider +duckdb +dugite +eccrypto +egg-bin +egg-ci +electron +electron-chromedriver +electron-prebuilt +electron-winstaller +elm +elm-format +es5-ext +esbuild +esoftplay +event-loop-stats +exifreader +external-svg-loader +farmhash +fast-folder-size +faunadb +ffi +ffi-napi +ffmpeg-static +fibers +fmerge +free-email-domains +fs-xattr +full-icu +gatsby +gatsby-cli +gatsby-telemetry +gc-stats +gcstats.js +geckodriver +gentype +ghooks +gif2webp-bin +gifsicle +git-commit-msg-linter +git-validate +git-win +gl +gmsmith +go-ios +grpc +grpc-tools +handbrake-js +hasura-cli +heapdump +highcharts-export-server +hiredis +hnswlib-node +hugo-bin +hummus +ibm_db +iconv +iedriver +iltorb +incremental-json-parser +inferno +install-peers +interruptor +iobroker.js-controller +iso-constants +isolated-vm +java +javascript-obfuscator +jest-preview +jpeg-recompress-bin +jpegtran-bin +keccak +kerberos +keytar +lefthook +leveldown +libpg-query +libpq +libxmljs +libxmljs2 +lint +lmdb +lmdb-store +local-cypress +lz4 +lzma-native +lzo +macos-alias +mbt +medusa-telemetry +memlab +metalsmith +microtime +minidump +mmmagic +modern-syslog +monaco-languageclient +mongodb-client-encryption +mongodb-crypt-library-dummy +mongodb-crypt-library-version +mongodb-memory-server +mozjpeg +ms-chromium-edge-driver +msgpackr-extract +msnodesqlv8 +msw +muhammara +neo4j-bloom +nestjs-pino +netlify-cli +next-plugin-preact +next-translate-plugin +ngrok +ngx-popperjs +nice-napi +node +node-expat +node-hid +node-jq +node-libcurl +node-pty +node-rdkafka +node-sass +node-webcrypto-ossl +node-zopfli +node-zopfli-es +nodegit +nodejieba +nodent-runtime +nuxt-edge +nx +odiff-bin +oniguruma +optipng-bin +oracledb +os-dns-native +parcel-bundler +parse-server +phantomjs +phantomjs-prebuilt +pkcs11js +playwright +playwright-chromium +playwright-firefox +playwright-webkit +pngout-bin +pngquant-bin +poolifier +posix +postinstall-postinstall +postinstall-prepare +pprof +pre-commit +pre-push +prisma +protobufjs +protoc +protoc-gen-grpc-web +puppeteer +quick-mongo-super +re2 +react-intl-universal +react-jsx-parser +react-native-calendar-picker +react-native-elements +react-native-inappbrowser-reborn +react-native-storage +react-native-stylex +react-native-unimodules +react-particles +react-ranger +react-tsparticles +react-vertical-timeline-component +realm +redis-memory-server +ref +ref-napi +registry-js +restana +rete +robotjs +rome +rovel.js +rxdb +sauce-connect-launcher +saucectl +scrollreveal +secp256k1 +segfault-handler +serverless +sfdx-cli +shared-git-hooks +sharp +simple-git-hooks +sleep +slice2js +snyk +sockopt +sodium-native +sonar-scanner +spectaql +spectron +spellchecker +sq-native +sqlite3 +sse4_crc32 +ssh2 +storage-engine +strapi +subrequests +subrequests-express +subrequests-json-merger +supabase +svelte-preprocess +svf-lib +swagger-ui +swiftlint +taco-cli +taiko +tesseract.js +tldjs +tree-sitter +tree-sitter-cli +tree-sitter-json +tree-sitter-kotlin +tree-sitter-typescript +tree-sitter-yaml +truffle +tsparticles-engine +ttag-cli +ttf2woff2 +turbo +typeit +typemoq +typeorm-fixtures-cli +typesense-instantsearch-adapter +unix-dgram +ursa-optional +usb +utf-8-validate +v8-profiler-next +vercel +vis-data +vis-network +vis-timeline +vue-demi +vue-echarts +vue-inbrowser-compiler-demi +vue-ls +vue-material +vue-popperjs +vue-test-utils +vuepress +vuex-module-decorators +wd +wdeasync +weak-napi +web3-bzz +web3-shh +webdev-toolkit +windows-build-tools +wix-style-react +wordpos +workerd +wrtc +xxhash +yarn +yo +yorkie +zapatos +zeromq +zlib-sync +zopflipng-bin
\ No newline at end of file diff --git a/src/install/install-scripts-allowlist.txt b/src/install/install-scripts-allowlist.txt deleted file mode 100644 index fdd5d1c06..000000000 --- a/src/install/install-scripts-allowlist.txt +++ /dev/null @@ -1,4 +0,0 @@ -static-ffmpeg -canvas -better-sqlite3 -node-sass
\ No newline at end of file diff --git a/src/install/install.zig b/src/install/install.zig index 693773947..06eb7296e 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -9,6 +9,8 @@ const stringZ = bun.stringZ; const default_allocator = bun.default_allocator; const C = bun.C; const std = @import("std"); +const uws = @import("../deps/uws.zig"); +const JSC = bun.JSC; const JSLexer = bun.js_lexer; const logger = bun.logger; @@ -1636,6 +1638,51 @@ pub const CacheLevel = struct { const AsyncIO = bun.AsyncIO; const Waker = AsyncIO.Waker; +const Waiter = struct { + onWait: *const fn (this: *anyopaque) AsyncIO.Errno!usize, + onWake: *const fn (this: *anyopaque) void, + ctx: *anyopaque, + + pub fn init( + ctx: anytype, + comptime onWait: *const fn (this: @TypeOf(ctx)) AsyncIO.Errno!usize, + comptime onWake: *const fn (this: @TypeOf(ctx)) void, + ) Waiter { + return Waiter{ + .ctx = @ptrCast(ctx), + .onWait = @alignCast(@ptrCast(@as(*const anyopaque, @ptrCast(onWait)))), + .onWake = @alignCast(@ptrCast(@as(*const anyopaque, @ptrCast(onWake)))), + }; + } + + pub fn wait(this: *Waiter) AsyncIO.Errno!usize { + return this.onWait(this.ctx); + } + + pub fn wake(this: *Waiter) void { + this.onWake(this.ctx); + } + + pub fn fromUWSLoop(loop: *uws.Loop) Waiter { + const Handlers = struct { + fn onWait(uws_loop: *uws.Loop) AsyncIO.Errno!usize { + uws_loop.run(); + return 0; + } + + fn onWake(uws_loop: *uws.Loop) void { + uws_loop.wakeup(); + } + }; + + return Waiter.init( + loop, + Handlers.onWait, + Handlers.onWake, + ); + } +}; + // We can't know all the packages we need until we've downloaded all the packages // The easy way would be: // 1. Download all packages, parsing their dependencies and enqueuing all dependencies for resolution @@ -1697,7 +1744,7 @@ pub const PackageManager = struct { global_link_dir: ?std.fs.IterableDir = null, global_dir: ?std.fs.IterableDir = null, global_link_dir_path: string = "", - waiter: Waker = undefined, + waiter: Waiter = undefined, wait_count: std.atomic.Atomic(usize) = std.atomic.Atomic(usize).init(0), onWake: WakeHandler = .{}, @@ -1705,6 +1752,9 @@ pub const PackageManager = struct { peer_dependencies: std.ArrayListUnmanaged(DependencyID) = .{}, + uws_event_loop: *uws.Loop, + file_poll_store: JSC.FilePoll.Store, + // name hash from alias package name -> aliased package dependency version info known_npm_aliases: NpmAliasMap = .{}, @@ -1768,7 +1818,7 @@ pub const PackageManager = struct { } _ = this.wait_count.fetchAdd(1, .Monotonic); - this.waiter.wake() catch {}; + this.waiter.wake(); } pub fn sleep(this: *PackageManager) void { @@ -3993,6 +4043,7 @@ pub const PackageManager = struct { return CacheDir{ .is_node_modules = true, .path = Fs.FileSystem.instance.abs(&fallback_parts) }; } + /// fn tick( pub fn runTasks( manager: *PackageManager, comptime ExtractCompletionContext: type, @@ -4617,6 +4668,8 @@ pub const PackageManager = struct { manager.drainDependencyList(); + manager.uws_event_loop.run(); + if (comptime log_level.showProgress()) { if (@hasField(@TypeOf(callbacks), "progress_bar") and callbacks.progress_bar == true) { const completed_items = manager.total_tasks - manager.pending_tasks; @@ -5600,9 +5653,11 @@ pub const PackageManager = struct { .resolve_tasks = TaskChannel.init(), .lockfile = undefined, .root_package_json_file = package_json_file, - .waiter = try Waker.init(ctx.allocator), + .waiter = Waiter.fromUWSLoop(uws.Loop.get()), .workspaces = workspaces, // .progress + .uws_event_loop = uws.Loop.get(), + .file_poll_store = JSC.FilePoll.Store.init(ctx.allocator), }; manager.lockfile = try ctx.allocator.create(Lockfile); @@ -5679,7 +5734,9 @@ pub const PackageManager = struct { .resolve_tasks = TaskChannel.init(), .lockfile = undefined, .root_package_json_file = undefined, - .waiter = try Waker.init(allocator), + .waiter = Waiter.fromUWSLoop(uws.Loop.get()), + .uws_event_loop = uws.Loop.get(), + .file_poll_store = JSC.FilePoll.Store.init(allocator), .workspaces = std.StringArrayHashMap(?Semver.Version).init(allocator), }; manager.lockfile = try allocator.create(Lockfile); @@ -7165,7 +7222,7 @@ pub const PackageManager = struct { } } - if (resolution.tag == .workspace or this.lockfile.trusted_dependencies.contains(@as(u32, @truncate(String.Builder.stringHash(name))))) { + if (resolution.tag == .workspace or this.lockfile.hasTrustedDependency(name)) { var scripts = this.lockfile.packages.items(.scripts)[package_id]; if (scripts.hasAny()) { var path_buf: [bun.MAX_PATH_BYTES]u8 = undefined; @@ -7175,7 +7232,7 @@ pub const PackageManager = struct { .posix, ); - scripts.enqueue(this.lockfile, buf, path_str); + scripts.enqueue(this.lockfile, buf, path_str, name); } else if (!scripts.filled) { var path_buf: [bun.MAX_PATH_BYTES]u8 = undefined; const path_str = Path.joinAbsString( @@ -7190,6 +7247,7 @@ pub const PackageManager = struct { this.node_modules_folder.dir, destination_dir_subpath, path_str, + name, ) catch |err| { if (comptime log_level != .silent) { const fmt = "\n<r><red>error:<r> failed to parse life-cycle scripts for <b>{s}<r>: {s}\n"; @@ -8014,6 +8072,7 @@ pub const PackageManager = struct { manager.lockfile, lockfile.buffers.string_bytes.items, strings.withoutTrailingSlash(Fs.FileSystem.instance.top_level_dir), + maybe_root.name.slice(lockfile.buffers.string_bytes.items), ); } } @@ -8166,12 +8225,7 @@ pub const PackageManager = struct { try manager.setupGlobalDir(&ctx); } - // We don't always save the lockfile. - // This is for two reasons. - // 1. It's unnecessary work if there are no changes - // 2. There is a determinism issue in the file where alignment bytes might be garbage data - // This is a bug that needs to be fixed, however we can work around it for now - // by avoiding saving the lockfile + // It's unnecessary work to re-save the lockfile if there are no changes if (manager.options.do.save_lockfile and (did_meta_hash_change or manager.lockfile.isEmpty() or manager.options.enable.force_save_lockfile)) save: { @@ -8222,6 +8276,7 @@ pub const PackageManager = struct { manager.lockfile, manager.lockfile.buffers.string_bytes.items, strings.withoutTrailingSlash(Fs.FileSystem.instance.top_level_dir), + root.name.slice(manager.lockfile.buffers.string_bytes.items), ); } @@ -8370,15 +8425,15 @@ pub const PackageManager = struct { if (run_lifecycle_scripts and install_summary.fail == 0) { // 2. install // 3. postinstall - try manager.lockfile.scripts.run(manager.allocator, manager.env, log_level != .silent, "install"); - try manager.lockfile.scripts.run(manager.allocator, manager.env, log_level != .silent, "postinstall"); + try manager.lockfile.scripts.spawnAllPackageScripts(manager, log_level, log_level != .silent, "install"); + try manager.lockfile.scripts.spawnAllPackageScripts(manager, log_level, log_level != .silent, "postinstall"); // 4. preprepare // 5. prepare // 6. postprepare - try manager.lockfile.scripts.run(manager.allocator, manager.env, log_level != .silent, "preprepare"); - try manager.lockfile.scripts.run(manager.allocator, manager.env, log_level != .silent, "prepare"); - try manager.lockfile.scripts.run(manager.allocator, manager.env, log_level != .silent, "postprepare"); + try manager.lockfile.scripts.spawnAllPackageScripts(manager, log_level, log_level != .silent, "preprepare"); + try manager.lockfile.scripts.spawnAllPackageScripts(manager, log_level, log_level != .silent, "prepare"); + try manager.lockfile.scripts.spawnAllPackageScripts(manager, log_level, log_level != .silent, "postprepare"); } if (comptime log_level != .silent) { diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index 3e82c8891..ddbfaba50 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -11,6 +11,7 @@ const stringZ = bun.stringZ; const default_allocator = bun.default_allocator; const C = bun.C; const JSAst = bun.JSAst; +const JSC = bun.JSC; const JSLexer = bun.js_lexer; const logger = bun.logger; @@ -82,6 +83,7 @@ const PackageNameHash = Install.PackageNameHash; const Resolution = @import("./resolution.zig").Resolution; const Crypto = @import("../sha.zig").Hashers; const PackageJSON = @import("../resolver/package_json.zig").PackageJSON; +const StaticHashMap = @import("../StaticHashMap.zig").StaticHashMap; const MetaHash = [std.crypto.hash.sha2.Sha512256.digest_length]u8; const zero_hash = std.mem.zeroes(MetaHash); @@ -108,21 +110,26 @@ allocator: Allocator, scratch: Scratch = .{}, scripts: Scripts = .{}, -trusted_dependencies: NameHashSet = .{}, workspace_paths: NameHashMap = .{}, workspace_versions: VersionHashMap = .{}, +has_trusted_dependencies: bool = false, +trusted_dependencies: NameHashSet = .{}, overrides: OverrideMap = .{}, const Stream = std.io.FixedBufferStream([]u8); pub const default_filename = "bun.lockb"; pub const Scripts = struct { + const MAX_PARALLEL_PROCESSES = 10; const Entry = struct { cwd: string, script: string, + package_name: string, }; const Entries = std.ArrayListUnmanaged(Entry); + + const Queue = std.fifo.LinearFifo(*RunCommand.SpawnedScript, .Dynamic); const RunCommand = @import("../cli/run_command.zig").RunCommand; preinstall: Entries = .{}, @@ -142,7 +149,25 @@ pub const Scripts = struct { pub fn run(this: *Scripts, allocator: Allocator, env: *DotEnv.Loader, silent: bool, comptime hook: []const u8) !void { for (@field(this, hook).items) |entry| { if (comptime Environment.allow_assert) std.debug.assert(Fs.FileSystem.instance_loaded); - _ = try RunCommand.runPackageScript(allocator, entry.script, hook, entry.cwd, env, &.{}, silent); + _ = try RunCommand.runPackageScriptForeground(allocator, entry.script, hook, entry.cwd, env, &.{}, silent); + } + } + + pub fn spawnAllPackageScripts(this: *Scripts, ctx: *PackageManager, comptime log_level: anytype, silent: bool, comptime hook: []const u8) !void { + _ = log_level; + if (comptime Environment.allow_assert) std.debug.assert(Fs.FileSystem.instance_loaded); + + const items = @field(this, hook).items; + if (items.len > 0) { + ctx.pending_tasks = @truncate(items.len); + + for (items) |entry| { + try RunCommand.spawnPackageScript(ctx, entry.script, hook, entry.package_name, entry.cwd, &.{}, silent); + } + + while (ctx.pending_tasks > 0) { + ctx.uws_event_loop.tick(null); + } } } @@ -2286,13 +2311,14 @@ pub const Package = extern struct { return false; } - pub fn enqueue(this: *const Package.Scripts, lockfile: *Lockfile, buf: []const u8, cwd: string) void { + pub fn enqueue(this: *const Package.Scripts, lockfile: *Lockfile, buf: []const u8, cwd: string, package_name: string) void { inline for (Package.Scripts.Hooks) |hook| { const script = @field(this, hook); if (!script.isEmpty()) { @field(lockfile.scripts, hook).append(lockfile.allocator, .{ .cwd = lockfile.allocator.dupe(u8, cwd) catch unreachable, .script = lockfile.allocator.dupe(u8, script.slice(buf)) catch unreachable, + .package_name = package_name, }) catch unreachable; } } @@ -2333,6 +2359,7 @@ pub const Package = extern struct { node_modules: std.fs.Dir, subpath: [:0]const u8, cwd: string, + name: string, ) !void { var pkg_dir = try bun.openDir(node_modules, subpath); defer pkg_dir.close(); @@ -2357,7 +2384,7 @@ pub const Package = extern struct { try builder.allocate(); this.parseAlloc(lockfile.allocator, &builder, json); - this.enqueue(lockfile, tmp.buffers.string_bytes.items, cwd); + this.enqueue(lockfile, tmp.buffers.string_bytes.items, cwd, name); } }; @@ -3859,6 +3886,7 @@ pub const Package = extern struct { }; lockfile.trusted_dependencies.putAssumeCapacity(@as(u32, @truncate(String.Builder.stringHash(name))), {}); } + lockfile.has_trusted_dependencies = true; }, else => { log.addErrorFmt(&source, q.loc, allocator, @@ -5054,6 +5082,36 @@ pub fn resolve(this: *Lockfile, package_name: []const u8, version: Dependency.Ve return null; } +/// The default list of trusted dependencies is a static hashmap +const default_trusted_dependencies = brk: { + const max_values = 512; + + var map: StaticHashMap([]const u8, u0, std.hash_map.StringContext, max_values) = .{}; + + // This file contains a list of dependencies that Bun runs `postinstall` on by default. + const data = @embedFile("./default-trusted-dependencies.txt"); + @setEvalBranchQuota(99999); + + var iter = std.mem.tokenizeAny(u8, data, " \n\t"); + while (iter.next()) |dep| { + if (map.len == max_values) { + @compileError("default-trusted-dependencies.txt is too large, please increase 'max_values' in lockfile.zig"); + } + map.putAssumeCapacity(dep, 0); + } + + break :brk ↦ +}; + +pub fn hasTrustedDependency(this: *Lockfile, name: []const u8) bool { + if (this.has_trusted_dependencies) { + const hash = @as(u32, @truncate(String.Builder.stringHash(name))); + return this.trusted_dependencies.contains(hash); + } else { + return default_trusted_dependencies.has(name); + } +} + pub fn jsonStringifyDependency(this: *const Lockfile, w: anytype, dep: Dependency, res: ?PackageID) !void { const sb = this.buffers.string_bytes.items; var buf: [2048]u8 = undefined; diff --git a/src/io/io_darwin.zig b/src/io/io_darwin.zig index cb2d15afb..f789142ba 100644 --- a/src/io/io_darwin.zig +++ b/src/io/io_darwin.zig @@ -512,7 +512,7 @@ pub const Waker = struct { const zeroed = std.mem.zeroes([16]Kevent64); - pub fn wake(this: *Waker) !void { + pub fn wake(this: *Waker) void { bun.JSC.markBinding(@src()); if (io_darwin_schedule_wakeup(this.machport)) { diff --git a/src/io/io_linux.zig b/src/io/io_linux.zig index 8f054490b..b36c10e1d 100644 --- a/src/io/io_linux.zig +++ b/src/io/io_linux.zig @@ -1003,7 +1003,7 @@ pub const Waker = struct { return @as(u64, @intCast(bytes)); } - pub fn wake(this: *const Waker) !void { + pub fn wake(this: *const Waker) void { var bytes: usize = 1; _ = std.os.write( this.fd, diff --git a/src/network_thread.zig b/src/network_thread.zig index e1c2046a8..60d7d30f9 100644 --- a/src/network_thread.zig +++ b/src/network_thread.zig @@ -164,7 +164,7 @@ pub fn schedule(this: *@This(), batch: Batch) void { const one = @as([8]u8, @bitCast(@as(usize, batch.len))); _ = std.os.write(this.waker.fd, &one) catch @panic("Failed to write to eventfd"); } else { - this.waker.wake() catch @panic("Failed to wake"); + this.waker.wake(); } } |