aboutsummaryrefslogtreecommitdiff
path: root/src/install
diff options
context:
space:
mode:
authorGravatar Dylan Conway <dylan.conway567@gmail.com> 2023-10-17 14:10:25 -0700
committerGravatar Dylan Conway <dylan.conway567@gmail.com> 2023-10-17 14:10:25 -0700
commit7458b969c5d9971e89d187b687e1924e78da427e (patch)
treeee3dbf95c728cf407bf49a27826b541e9264a8bd /src/install
parentd4a2c29131ec154f5e4db897d4deedab2002cbc4 (diff)
parente91436e5248d947b50f90b4a7402690be8a41f39 (diff)
downloadbun-7458b969c5d9971e89d187b687e1924e78da427e.tar.gz
bun-7458b969c5d9971e89d187b687e1924e78da427e.tar.zst
bun-7458b969c5d9971e89d187b687e1924e78da427e.zip
Merge branch 'main' into postinstall_3
Diffstat (limited to 'src/install')
-rw-r--r--src/install/bin.zig65
-rw-r--r--src/install/dependency.zig213
-rw-r--r--src/install/extract_tarball.zig4
-rw-r--r--src/install/install.zig820
-rw-r--r--src/install/integrity.zig47
-rw-r--r--src/install/lockfile.zig1187
-rw-r--r--src/install/migration.zig947
-rw-r--r--src/install/npm.zig124
-rw-r--r--src/install/padding_checker.zig17
-rw-r--r--src/install/repository.zig28
-rw-r--r--src/install/resolution.zig97
-rw-r--r--src/install/semver.zig19
12 files changed, 3107 insertions, 461 deletions
diff --git a/src/install/bin.zig b/src/install/bin.zig
index 0a8d62c8c..3c101e388 100644
--- a/src/install/bin.zig
+++ b/src/install/bin.zig
@@ -20,7 +20,8 @@ pub const Bin = extern struct {
tag: Tag = Tag.none,
_padding_tag: [3]u8 = .{0} ** 3,
- value: Value = Value{ .none = {} },
+ // Largest member must be zero initialized
+ value: Value = Value{ .map = ExternalStringList{} },
pub fn verify(this: *const Bin, extern_strings: []const ExternalString) void {
if (comptime !Environment.allow_assert)
@@ -67,36 +68,55 @@ pub const Bin = extern struct {
}
pub fn clone(this: *const Bin, buf: []const u8, prev_external_strings: []const ExternalString, all_extern_strings: []ExternalString, extern_strings_slice: []ExternalString, comptime StringBuilder: type, builder: StringBuilder) Bin {
- return switch (this.tag) {
- .none => Bin{ .tag = .none, .value = .{ .none = {} } },
- .file => Bin{
- .tag = .file,
- .value = .{ .file = builder.append(String, this.value.file.slice(buf)) },
+ switch (this.tag) {
+ .none => {
+ return Bin{
+ .tag = .none,
+ .value = Value.init(.{ .none = {} }),
+ };
},
- .named_file => Bin{
- .tag = .named_file,
- .value = .{
- .named_file = [2]String{
- builder.append(String, this.value.named_file[0].slice(buf)),
- builder.append(String, this.value.named_file[1].slice(buf)),
- },
- },
+ .file => {
+ return Bin{
+ .tag = .file,
+ .value = Value.init(.{ .file = builder.append(String, this.value.file.slice(buf)) }),
+ };
+ },
+ .named_file => {
+ return Bin{
+ .tag = .named_file,
+ .value = Value.init(
+ .{
+ .named_file = [2]String{
+ builder.append(String, this.value.named_file[0].slice(buf)),
+ builder.append(String, this.value.named_file[1].slice(buf)),
+ },
+ },
+ ),
+ };
},
- .dir => Bin{
- .tag = .dir,
- .value = .{ .dir = builder.append(String, this.value.dir.slice(buf)) },
+ .dir => {
+ return Bin{
+ .tag = .dir,
+ .value = Value.init(.{ .dir = builder.append(String, this.value.dir.slice(buf)) }),
+ };
},
.map => {
for (this.value.map.get(prev_external_strings), 0..) |extern_string, i| {
extern_strings_slice[i] = builder.append(ExternalString, extern_string.slice(buf));
}
- return .{
+ return Bin{
.tag = .map,
- .value = .{ .map = ExternalStringList.init(all_extern_strings, extern_strings_slice) },
+ .value = Value.init(.{ .map = ExternalStringList.init(all_extern_strings, extern_strings_slice) }),
};
},
- };
+ }
+
+ unreachable;
+ }
+
+ pub fn init() Bin {
+ return bun.serializable(.{ .tag = .none, .value = Value.init(.{ .none = {} }) });
}
pub const Value = extern union {
@@ -132,6 +152,11 @@ pub const Bin = extern struct {
/// }
///```
map: ExternalStringList,
+
+ /// To avoid undefined memory between union values, we must zero initialize the union first.
+ pub fn init(field: anytype) Value {
+ return bun.serializableInto(Value, field);
+ }
};
pub const Tag = enum(u8) {
diff --git a/src/install/dependency.zig b/src/install/dependency.zig
index 3a9ee6b54..ca0d702aa 100644
--- a/src/install/dependency.zig
+++ b/src/install/dependency.zig
@@ -49,10 +49,10 @@ version: Dependency.Version = .{},
/// - `peerDependencies`
/// Technically, having the same package name specified under multiple fields is invalid
/// But we don't want to allocate extra arrays for them. So we use a bitfield instead.
-behavior: Behavior = .uninitialized,
+behavior: Behavior = Behavior.uninitialized,
/// Sorting order for dependencies is:
-/// 1. [`dependencies`, `devDependencies`, `optionalDependencies`, `peerDependencies`]
+/// 1. [ `peerDependencies`, `optionalDependencies`, `devDependencies`, `dependencies` ]
/// 2. name ASC
/// "name" must be ASC so that later, when we rebuild the lockfile
/// we insert it back in reverse order without an extra sorting pass
@@ -147,7 +147,7 @@ pub fn toDependency(
return Dependency{
.name = name,
.name_hash = @as(u64, @bitCast(this[8..16].*)),
- .behavior = @as(Dependency.Behavior, @enumFromInt(this[16])),
+ .behavior = @bitCast(this[16]),
.version = Dependency.Version.toVersion(name, this[17..this.len].*, ctx),
};
}
@@ -156,7 +156,7 @@ pub fn toExternal(this: Dependency) External {
var bytes: External = undefined;
bytes[0..this.name.bytes.len].* = this.name.bytes;
bytes[8..16].* = @as([8]u8, @bitCast(this.name_hash));
- bytes[16] = @intFromEnum(this.behavior);
+ bytes[16] = @bitCast(this.behavior);
bytes[17..bytes.len].* = this.version.toExternal();
return bytes;
}
@@ -189,24 +189,28 @@ pub inline fn isGitHubRepoPath(dependency: string) bool {
if (dependency.len < 3) return false;
var hash_index: usize = 0;
- var slash_index: usize = 0;
+
+ // the branch could have slashes
+ // - oven-sh/bun#brach/name
+ var first_slash_index: usize = 0;
for (dependency, 0..) |c, i| {
switch (c) {
'/' => {
if (i == 0) return false;
- if (slash_index > 0) return false;
- slash_index = i;
+ if (first_slash_index == 0) {
+ first_slash_index = i;
+ }
},
'#' => {
if (i == 0) return false;
if (hash_index > 0) return false;
- if (slash_index == 0) return false;
+ if (first_slash_index == 0) return false;
hash_index = i;
},
// Not allowed in username
'.', '_' => {
- if (slash_index == 0) return false;
+ if (first_slash_index == 0) return false;
},
// Must be alphanumeric
'-', 'a'...'z', 'A'...'Z', '0'...'9' => {},
@@ -214,7 +218,31 @@ pub inline fn isGitHubRepoPath(dependency: string) bool {
}
}
- return hash_index != dependency.len - 1 and slash_index > 0 and slash_index != dependency.len - 1;
+ return hash_index != dependency.len - 1 and first_slash_index > 0 and first_slash_index != dependency.len - 1;
+}
+
+/// Github allows for the following format of URL:
+/// https://github.com/<org>/<repo>/tarball/<ref>
+/// This is a legacy (but still supported) method of retrieving a tarball of an
+/// entire source tree at some git reference. (ref = branch, tag, etc. Note: branch
+/// can have arbitrary number of slashes)
+///
+/// This also checks for a github url that ends with ".tar.gz"
+pub inline fn isGitHubTarballPath(dependency: string) bool {
+ if (isTarball(dependency)) return true;
+
+ var parts = strings.split(dependency, "/");
+
+ var n_parts: usize = 0;
+
+ while (parts.next()) |part| {
+ n_parts += 1;
+ if (n_parts == 3) {
+ return strings.eql(part, "tarball");
+ }
+ }
+
+ return false;
}
// This won't work for query string params, but I'll let someone file an issue
@@ -224,7 +252,7 @@ pub inline fn isTarball(dependency: string) bool {
}
pub const Version = struct {
- tag: Dependency.Version.Tag = .uninitialized,
+ tag: Tag = .uninitialized,
literal: String = .{},
value: Value = .{ .uninitialized = {} },
@@ -489,8 +517,33 @@ pub const Version = struct {
},
else => {},
}
+
if (strings.hasPrefixComptime(url, "github.com/")) {
- if (isGitHubRepoPath(url["github.com/".len..])) return .github;
+ const path = url["github.com/".len..];
+ if (isGitHubTarballPath(path)) return .tarball;
+ if (isGitHubRepoPath(path)) return .github;
+ }
+
+ if (strings.indexOfChar(url, '.')) |dot| {
+ if (Repository.Hosts.has(url[0..dot])) return .git;
+ }
+
+ return .tarball;
+ }
+ }
+ },
+ 's' => {
+ if (strings.hasPrefixComptime(dependency, "ssh")) {
+ var url = dependency["ssh".len..];
+ if (url.len > 2) {
+ if (url[0] == ':') {
+ if (strings.hasPrefixComptime(url, "://")) {
+ url = url["://".len..];
+ }
+ }
+
+ if (strings.indexOfChar(url, '.')) |dot| {
+ if (Repository.Hosts.has(url[0..dot])) return .git;
}
}
}
@@ -561,7 +614,7 @@ pub const Version = struct {
}
};
- const NpmInfo = struct {
+ pub const NpmInfo = struct {
name: String,
version: Semver.Query.Group,
@@ -570,7 +623,7 @@ pub const Version = struct {
}
};
- const TagInfo = struct {
+ pub const TagInfo = struct {
name: String,
tag: String,
@@ -579,7 +632,7 @@ pub const Version = struct {
}
};
- const TarballInfo = struct {
+ pub const TarballInfo = struct {
uri: URI,
package_name: String = .{},
@@ -621,7 +674,8 @@ pub inline fn parse(
sliced: *const SlicedString,
log: ?*logger.Log,
) ?Version {
- return parseWithOptionalTag(allocator, alias, dependency, null, sliced, log);
+ const dep = std.mem.trimLeft(u8, dependency, " \t\n\r");
+ return parseWithTag(allocator, alias, dep, Version.Tag.infer(dep), sliced, log);
}
pub fn parseWithOptionalTag(
@@ -839,6 +893,12 @@ pub fn parseWithTag(
.literal = sliced.value(),
.value = .{ .tarball = .{ .uri = .{ .local = sliced.sub(dependency[7..]).value() } } },
};
+ } else if (strings.hasPrefixComptime(dependency, "file:")) {
+ return .{
+ .tag = .tarball,
+ .literal = sliced.value(),
+ .value = .{ .tarball = .{ .uri = .{ .local = sliced.sub(dependency[5..]).value() } } },
+ };
} else if (strings.contains(dependency, "://")) {
if (log_) |log| log.addErrorFmt(null, logger.Loc.Empty, allocator, "invalid or unsupported dependency \"{s}\"", .{dependency}) catch unreachable;
return null;
@@ -901,78 +961,83 @@ pub fn parseWithTag(
}
}
-pub const Behavior = enum(u8) {
- uninitialized = 0,
- _,
+pub const Behavior = packed struct(u8) {
+ pub const uninitialized: Behavior = .{};
- pub const normal: u8 = 1 << 1;
- pub const optional: u8 = 1 << 2;
- pub const dev: u8 = 1 << 3;
- pub const peer: u8 = 1 << 4;
- pub const workspace: u8 = 1 << 5;
+ // these padding fields are to have compatibility
+ // with older versions of lockfile v2
+ _unused_1: u1 = 0,
+
+ normal: bool = false,
+ optional: bool = false,
+ dev: bool = false,
+ peer: bool = false,
+ workspace: bool = false,
+
+ _unused_2: u2 = 0,
+
+ pub const normal = Behavior{ .normal = true };
+ pub const optional = Behavior{ .optional = true };
+ pub const dev = Behavior{ .dev = true };
+ pub const peer = Behavior{ .peer = true };
+ pub const workspace = Behavior{ .workspace = true };
pub inline fn isNormal(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.normal) != 0;
+ return this.normal;
}
pub inline fn isOptional(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.optional) != 0 and !this.isPeer();
+ return this.optional and !this.isPeer();
}
pub inline fn isDev(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.dev) != 0;
+ return this.dev;
}
pub inline fn isPeer(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.peer) != 0;
+ return this.peer;
}
pub inline fn isWorkspace(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.workspace) != 0;
+ return this.workspace;
}
pub inline fn setNormal(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.normal));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.normal));
- }
+ var b = this;
+ b.normal = value;
+ return b;
}
pub inline fn setOptional(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.optional));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.optional));
- }
+ var b = this;
+ b.optional = value;
+ return b;
}
pub inline fn setDev(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.dev));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.dev));
- }
+ var b = this;
+ b.dev = value;
+ return b;
}
pub inline fn setPeer(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.peer));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.peer));
- }
+ var b = this;
+ b.peer = value;
+ return b;
}
pub inline fn setWorkspace(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.workspace));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.workspace));
- }
+ var b = this;
+ b.workspace = value;
+ return b;
+ }
+
+ pub inline fn eq(lhs: Behavior, rhs: Behavior) bool {
+ return @as(u8, @bitCast(lhs)) == @as(u8, @bitCast(rhs));
}
pub inline fn cmp(lhs: Behavior, rhs: Behavior) std.math.Order {
- if (@intFromEnum(lhs) == @intFromEnum(rhs)) {
+ if (eq(lhs, rhs)) {
return .eq;
}
@@ -1025,4 +1090,42 @@ pub const Behavior = enum(u8) {
(features.peer_dependencies and this.isPeer()) or
this.isWorkspace();
}
+
+ pub fn format(self: Behavior, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
+ const fields = std.meta.fields(Behavior);
+ var num_fields: u8 = 0;
+ inline for (fields) |f| {
+ if (f.type == bool and @field(self, f.name)) {
+ num_fields += 1;
+ }
+ }
+ switch (num_fields) {
+ 0 => try writer.writeAll("Behavior.uninitialized"),
+ 1 => {
+ inline for (fields) |f| {
+ if (f.type == bool and @field(self, f.name)) {
+ try writer.writeAll("Behavior." ++ f.name);
+ break;
+ }
+ }
+ },
+ else => {
+ try writer.writeAll("Behavior{");
+ inline for (fields) |f| {
+ if (f.type == bool and @field(self, f.name)) {
+ try writer.writeAll(" " ++ f.name);
+ }
+ }
+ try writer.writeAll(" }");
+ },
+ }
+ }
+
+ comptime {
+ std.debug.assert(@as(u8, @bitCast(Behavior.normal)) == (1 << 1));
+ std.debug.assert(@as(u8, @bitCast(Behavior.optional)) == (1 << 2));
+ std.debug.assert(@as(u8, @bitCast(Behavior.dev)) == (1 << 3));
+ std.debug.assert(@as(u8, @bitCast(Behavior.peer)) == (1 << 4));
+ std.debug.assert(@as(u8, @bitCast(Behavior.workspace)) == (1 << 5));
+ }
};
diff --git a/src/install/extract_tarball.zig b/src/install/extract_tarball.zig
index 2bcad3a53..d0dff72c5 100644
--- a/src/install/extract_tarball.zig
+++ b/src/install/extract_tarball.zig
@@ -374,8 +374,10 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD
}
const ret_json_path = try FileSystem.instance.dirname_store.append(@TypeOf(json_path), json_path);
+ const url = try FileSystem.instance.dirname_store.append(@TypeOf(this.url.slice()), this.url.slice());
+
return .{
- .url = this.url.slice(),
+ .url = url,
.resolved = resolved,
.json_path = ret_json_path,
.json_buf = json_buf,
diff --git a/src/install/install.zig b/src/install/install.zig
index d455e5670..4a20d1b9f 100644
--- a/src/install/install.zig
+++ b/src/install/install.zig
@@ -125,8 +125,8 @@ pub fn ExternalSlice(comptime Type: type) type {
pub fn ExternalSliceAligned(comptime Type: type, comptime alignment_: ?u29) type {
return extern struct {
- const alignment = alignment_ orelse @alignOf(*Type);
- const Slice = @This();
+ pub const alignment = alignment_ orelse @alignOf(*Type);
+ pub const Slice = @This();
pub const Child: type = Type;
@@ -172,7 +172,7 @@ pub const ExternalStringMap = extern struct {
value: ExternalStringList = .{},
};
-pub const PackageNameHash = u64;
+pub const PackageNameHash = u64; // Use String.Builder.stringHash to compute this
pub const Aligner = struct {
pub fn write(comptime Type: type, comptime Writer: type, writer: Writer, pos: usize) !usize {
@@ -254,16 +254,29 @@ const NetworkTask = struct {
warn_on_error: bool,
) !void {
this.url_buf = blk: {
+
+ // Not all registries support scoped package names when fetching the manifest.
+ // registry.npmjs.org supports both "@storybook%2Faddons" and "@storybook/addons"
+ // Other registries like AWS codeartifact only support the former.
+ // "npm" CLI requests the manifest with the encoded name.
+ var arena = std.heap.ArenaAllocator.init(bun.default_allocator);
+ defer arena.deinit();
+ var stack_fallback_allocator = std.heap.stackFallback(512, arena.allocator());
+ var encoded_name = name;
+ if (strings.containsChar(name, '/')) {
+ encoded_name = try std.mem.replaceOwned(u8, stack_fallback_allocator.get(), name, "/", "%2f");
+ }
+
const tmp = bun.JSC.URL.join(
bun.String.fromUTF8(scope.url.href),
- bun.String.fromUTF8(name),
+ bun.String.fromUTF8(encoded_name),
);
defer tmp.deref();
if (tmp.tag == .Dead) {
const msg = .{
- .fmt = "Failed to join registry \"{s}\" and package \"{s}\" URLs",
- .args = .{ scope.url.href, name },
+ .fmt = "Failed to join registry {} and package {} URLs",
+ .args = .{ strings.QuotedFormatter{ .text = scope.url.href }, strings.QuotedFormatter{ .text = name } },
};
if (warn_on_error)
@@ -347,6 +360,10 @@ const NetworkTask = struct {
);
this.http.client.reject_unauthorized = this.package_manager.tlsRejectUnauthorized();
+ if (PackageManager.verbose_install) {
+ this.http.client.verbose = true;
+ }
+
this.callback = .{
.package_manifest = .{
.name = try strings.StringOrTinyString.initAppendIfNeeded(name, *FileSystem.FilenameStore, &FileSystem.FilenameStore.instance),
@@ -426,6 +443,9 @@ const NetworkTask = struct {
null,
);
this.http.client.reject_unauthorized = this.package_manager.tlsRejectUnauthorized();
+ if (PackageManager.verbose_install) {
+ this.http.client.verbose = true;
+ }
this.callback = .{ .extract = tarball };
}
@@ -1072,6 +1092,7 @@ const PackageInstall = struct {
)) {
0 => {},
else => |errno| switch (std.os.errno(errno)) {
+ .XDEV => return error.NotSupported, // not same file system
.OPNOTSUPP => return error.NotSupported,
.NOENT => return error.FileNotFound,
// sometimes the downlowded npm package has already node_modules with it, so just ignore exist error here
@@ -1130,6 +1151,7 @@ const PackageInstall = struct {
)) {
0 => .{ .success = {} },
else => |errno| switch (std.os.errno(errno)) {
+ .XDEV => error.NotSupported, // not same file system
.OPNOTSUPP => error.NotSupported,
.NOENT => error.FileNotFound,
// We first try to delete the directory
@@ -1683,6 +1705,9 @@ pub const PackageManager = struct {
cpu_count: u32 = 0,
package_json_updates: []UpdateRequest = &[_]UpdateRequest{},
+ // used for looking up workspaces that aren't loaded into Lockfile.workspace_paths
+ workspaces: std.StringArrayHashMap(?Semver.Version),
+
// progress bar stuff when not stack allocated
root_progress_node: *std.Progress.Node = undefined,
root_download_node: std.Progress.Node = undefined,
@@ -1722,6 +1747,9 @@ pub const PackageManager = struct {
wait_count: std.atomic.Atomic(usize) = std.atomic.Atomic(usize).init(0),
onWake: WakeHandler = .{},
+ ci_mode: bun.LazyBool(computeIsContinuousIntegration, @This(), "ci_mode") = .{},
+
+ peer_dependencies: std.ArrayListUnmanaged(DependencyID) = .{},
uws_event_loop: *uws.Loop,
file_poll_store: JSC.FilePoll.Store,
@@ -1745,6 +1773,14 @@ pub const PackageManager = struct {
return this.env.getTLSRejectUnauthorized();
}
+ pub fn computeIsContinuousIntegration(this: *PackageManager) bool {
+ return this.env.isCI();
+ }
+
+ pub inline fn isContinuousIntegration(this: *PackageManager) bool {
+ return this.ci_mode.get();
+ }
+
pub const WakeHandler = struct {
// handler: fn (ctx: *anyopaque, pm: *PackageManager) void = undefined,
// onDependencyError: fn (ctx: *anyopaque, Dependency, PackageID, anyerror) void = undefined,
@@ -1783,7 +1819,7 @@ pub const PackageManager = struct {
pub fn sleep(this: *PackageManager) void {
if (this.wait_count.swap(0, .Monotonic) > 0) return;
- bun.Mimalloc.mi_collect(false);
+ Output.flush();
_ = this.waiter.wait() catch 0;
}
@@ -1834,6 +1870,7 @@ pub const PackageManager = struct {
dep_id,
&this.lockfile.buffers.dependencies.items[dep_id],
invalid_package_id,
+ false,
assignRootResolution,
failRootResolution,
) catch |err| {
@@ -1848,7 +1885,7 @@ pub const PackageManager = struct {
switch (this.options.log_level) {
inline else => |log_level| {
if (log_level.showProgress()) this.startProgressBarIfNone();
- while (this.pending_tasks > 0) : (this.sleep()) {
+ while (this.pending_tasks > 0) {
this.runTasks(
void,
{},
@@ -1858,10 +1895,18 @@ pub const PackageManager = struct {
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
+ false,
log_level,
) catch |err| {
return .{ .failure = err };
};
+
+ if (PackageManager.verbose_install and this.pending_tasks > 0) {
+ Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n", .{this.pending_tasks});
+ }
+
+ if (this.pending_tasks > 0)
+ this.sleep();
}
},
}
@@ -1914,6 +1959,47 @@ pub const PackageManager = struct {
@memset(this.preinstall_state.items[offset..], PreinstallState.unknown);
}
+ pub fn formatLaterVersionInCache(
+ this: *PackageManager,
+ name: []const u8,
+ name_hash: PackageNameHash,
+ resolution: Resolution,
+ ) ?Semver.Version.Formatter {
+ switch (resolution.tag) {
+ Resolution.Tag.npm => {
+ if (resolution.value.npm.version.tag.hasPre())
+ // TODO:
+ return null;
+
+ // We skip this in CI because we don't want any performance impact in an environment you'll probably never use
+ // and it makes tests more consistent
+ if (this.isContinuousIntegration())
+ return null;
+
+ const manifest: *const Npm.PackageManifest = this.manifests.getPtr(name_hash) orelse brk: {
+ if (Npm.PackageManifest.Serializer.load(this.allocator, this.getCacheDirectory(), name) catch null) |manifest_| {
+ this.manifests.put(this.allocator, name_hash, manifest_) catch return null;
+ break :brk this.manifests.getPtr(name_hash).?;
+ }
+
+ return null;
+ };
+
+ if (manifest.findByDistTag("latest")) |latest_version| {
+ if (latest_version.version.order(
+ resolution.value.npm.version,
+ manifest.string_buf,
+ this.lockfile.buffers.string_bytes.items,
+ ) != .gt) return null;
+ return latest_version.version.fmt(manifest.string_buf);
+ }
+
+ return null;
+ },
+ else => return null,
+ }
+ }
+
pub fn setPreinstallState(this: *PackageManager, package_id: PackageID, lockfile: *Lockfile, value: PreinstallState) void {
this.ensurePreinstallStateListCapacity(lockfile.packages.len) catch return;
this.preinstall_state.items[package_id] = value;
@@ -2111,20 +2197,27 @@ pub const PackageManager = struct {
}
fn allocGitHubURL(this: *const PackageManager, repository: *const Repository) string {
- var github_api_domain: string = "api.github.com";
- if (this.env.map.get("GITHUB_API_DOMAIN")) |api_domain| {
- if (api_domain.len > 0) {
- github_api_domain = api_domain;
+ var github_api_url: string = "https://api.github.com";
+ if (this.env.map.get("GITHUB_API_URL")) |url| {
+ if (url.len > 0) {
+ github_api_url = url;
}
}
+
+ const owner = this.lockfile.str(&repository.owner);
+ const repo = this.lockfile.str(&repository.repo);
+ const committish = this.lockfile.str(&repository.committish);
+
return std.fmt.allocPrint(
this.allocator,
- "https://{s}/repos/{s}/{s}/tarball/{s}",
+ "{s}/repos/{s}/{s}{s}tarball/{s}",
.{
- github_api_domain,
- this.lockfile.str(&repository.owner),
- this.lockfile.str(&repository.repo),
- this.lockfile.str(&repository.committish),
+ strings.withoutTrailingSlash(github_api_url),
+ owner,
+ repo,
+ // repo might be empty if dep is https://github.com/... style
+ if (repo.len > 0) "/" else "",
+ committish,
},
) catch unreachable;
}
@@ -2447,13 +2540,14 @@ pub const PackageManager = struct {
behavior: Behavior,
manifest: *const Npm.PackageManifest,
find_result: Npm.PackageManifest.FindResult,
+ install_peer: bool,
comptime successFn: SuccessFn,
) !?ResolvedPackageResult {
// Was this package already allocated? Let's reuse the existing one.
if (this.lockfile.getPackageID(
name_hash,
- if (behavior.isPeer()) version else null,
+ if (behavior.isPeer() and !install_peer) version else null,
&.{
.tag = .npm,
.value = .{
@@ -2469,7 +2563,7 @@ pub const PackageManager = struct {
.package = this.lockfile.packages.get(id),
.is_first_time = false,
};
- } else if (behavior.isPeer()) {
+ } else if (behavior.isPeer() and !install_peer) {
return null;
}
@@ -2574,7 +2668,7 @@ pub const PackageManager = struct {
if (comptime Environment.allow_assert) {
std.debug.assert(dependency_id < buffers.resolutions.items.len);
std.debug.assert(package_id < this.lockfile.packages.len);
- std.debug.assert(buffers.resolutions.items[dependency_id] == invalid_package_id);
+ // std.debug.assert(buffers.resolutions.items[dependency_id] == invalid_package_id);
}
buffers.resolutions.items[dependency_id] = package_id;
const string_buf = buffers.string_bytes.items;
@@ -2609,6 +2703,7 @@ pub const PackageManager = struct {
behavior: Behavior,
dependency_id: DependencyID,
resolution: PackageID,
+ install_peer: bool,
comptime successFn: SuccessFn,
) !?ResolvedPackageResult {
name.assertDefined();
@@ -2619,6 +2714,29 @@ pub const PackageManager = struct {
switch (version.tag) {
.npm, .dist_tag => {
+ if (version.tag == .npm) {
+ if (this.lockfile.workspace_versions.count() > 0) resolve_from_workspace: {
+ if (this.lockfile.workspace_versions.get(name_hash)) |workspace_version| {
+ if (version.value.npm.version.satisfies(workspace_version)) {
+ const root_package = this.lockfile.rootPackage() orelse break :resolve_from_workspace;
+ const root_dependencies = root_package.dependencies.get(this.lockfile.buffers.dependencies.items);
+ const root_resolutions = root_package.resolutions.get(this.lockfile.buffers.resolutions.items);
+
+ for (root_dependencies, root_resolutions) |root_dep, workspace_package_id| {
+ if (workspace_package_id != invalid_package_id and root_dep.version.tag == .workspace and root_dep.name_hash == name_hash) {
+ // make sure verifyResolutions sees this resolution as a valid package id
+ this.lockfile.buffers.resolutions.items[dependency_id] = workspace_package_id;
+ return .{
+ .package = this.lockfile.packages.get(workspace_package_id),
+ .is_first_time = false,
+ };
+ }
+ }
+ }
+ }
+ }
+ }
+
// Resolve the version from the loaded NPM manifest
const manifest = this.manifests.getPtr(name_hash) orelse return null; // manifest might still be downloading. This feels unreliable.
const find_result: Npm.PackageManifest.FindResult = switch (version.tag) {
@@ -2639,6 +2757,7 @@ pub const PackageManager = struct {
behavior,
manifest,
find_result,
+ install_peer,
successFn,
);
},
@@ -2661,8 +2780,8 @@ pub const PackageManager = struct {
}
},
.workspace => {
- // relative to cwd
- const workspace_path: *const String = this.lockfile.workspace_paths.getPtr(@truncate(String.Builder.stringHash(this.lockfile.str(&version.value.workspace)))) orelse &version.value.workspace;
+ // package name hash should be used to find workspace path from map
+ const workspace_path: *const String = this.lockfile.workspace_paths.getPtr(@truncate(name_hash)) orelse &version.value.workspace;
const res = FolderResolution.getOrPut(.{ .relative = .workspace }, version, this.lockfile.str(workspace_path), this);
@@ -2912,16 +3031,20 @@ pub const PackageManager = struct {
/// This must be a *const to prevent UB
dependency: *const Dependency,
resolution: PackageID,
+ install_peer: bool,
) !void {
return this.enqueueDependencyWithMainAndSuccessFn(
id,
dependency,
resolution,
+ install_peer,
assignResolution,
null,
);
}
+ const debug = Output.scoped(.PackageManager, true);
+
/// Q: "What do we do with a dependency in a package.json?"
/// A: "We enqueue it!"
fn enqueueDependencyWithMainAndSuccessFn(
@@ -2930,19 +3053,35 @@ pub const PackageManager = struct {
/// This must be a *const to prevent UB
dependency: *const Dependency,
resolution: PackageID,
+ install_peer: bool,
comptime successFn: SuccessFn,
comptime failFn: ?FailFn,
) !void {
- const name = dependency.realname();
+ var name = dependency.realname();
- const name_hash = switch (dependency.version.tag) {
- .dist_tag, .git, .github, .npm, .tarball => String.Builder.stringHash(this.lockfile.str(&name)),
+ var name_hash = switch (dependency.version.tag) {
+ .dist_tag, .git, .github, .npm, .tarball, .workspace => String.Builder.stringHash(this.lockfile.str(&name)),
else => dependency.name_hash,
};
- const version = dependency.version;
+ const version = version: {
+ if (this.lockfile.overrides.get(name_hash)) |new| {
+ debug("override: {s} -> {s}", .{ this.lockfile.str(&dependency.version.literal), this.lockfile.str(&new.literal) });
+ name = switch (new.tag) {
+ .dist_tag => new.value.dist_tag.name,
+ .git => new.value.git.package_name,
+ .github => new.value.github.package_name,
+ .npm => new.value.npm.name,
+ .tarball => new.value.tarball.package_name,
+ else => name,
+ };
+ name_hash = String.Builder.stringHash(this.lockfile.str(&name));
+ break :version new;
+ }
+ break :version dependency.version;
+ };
var loaded_manifest: ?Npm.PackageManifest = null;
- switch (dependency.version.tag) {
+ switch (version.tag) {
.dist_tag, .folder, .npm => {
retry_from_manifests_ptr: while (true) {
var resolve_result_ = this.getOrPutResolvedPackage(
@@ -2952,6 +3091,7 @@ pub const PackageManager = struct {
dependency.behavior,
id,
resolution,
+ install_peer,
successFn,
);
@@ -3048,13 +3188,37 @@ pub const PackageManager = struct {
this.enqueueNetworkTask(network_task);
}
}
- } else if (dependency.version.tag.isNPM()) {
+
+ if (comptime Environment.allow_assert)
+ debug(
+ "enqueueDependency({d}, {s}, {s}, {s}) = {d}",
+ .{
+ id,
+ @tagName(version.tag),
+ this.lockfile.str(&name),
+ this.lockfile.str(&version.literal),
+ result.package.meta.id,
+ },
+ );
+ } else if (version.tag.isNPM()) {
const name_str = this.lockfile.str(&name);
const task_id = Task.Id.forManifest(name_str);
if (comptime Environment.allow_assert) std.debug.assert(task_id != 0);
- if (!dependency.behavior.isPeer()) {
+ if (comptime Environment.allow_assert)
+ debug(
+ "enqueueDependency({d}, {s}, {s}, {s}) = task {d}",
+ .{
+ id,
+ @tagName(version.tag),
+ this.lockfile.str(&name),
+ this.lockfile.str(&version.literal),
+ task_id,
+ },
+ );
+
+ if (!dependency.behavior.isPeer() or install_peer) {
var network_entry = try this.network_dedupe_map.getOrPutContext(this.allocator, task_id, .{});
if (!network_entry.found_existing) {
if (this.options.enable.manifest_cache) {
@@ -3068,8 +3232,8 @@ pub const PackageManager = struct {
// If it's an exact package version already living in the cache
// We can skip the network request, even if it's beyond the caching period
- if (dependency.version.tag == .npm and dependency.version.value.npm.version.isExact()) {
- if (loaded_manifest.?.findByVersion(dependency.version.value.npm.version.head.head.range.left.version)) |find_result| {
+ if (version.tag == .npm and version.value.npm.version.isExact()) {
+ if (loaded_manifest.?.findByVersion(version.value.npm.version.head.head.range.left.version)) |find_result| {
if (this.getOrPutResolvedPackageWithFindResult(
name_hash,
name,
@@ -3078,6 +3242,7 @@ pub const PackageManager = struct {
dependency.behavior,
&loaded_manifest.?,
find_result,
+ install_peer,
successFn,
) catch null) |new_resolve_result| {
resolve_result_ = new_resolve_result;
@@ -3115,6 +3280,10 @@ pub const PackageManager = struct {
);
this.enqueueNetworkTask(network_task);
}
+ } else {
+ if (this.options.do.install_peer_dependencies) {
+ try this.peer_dependencies.append(this.allocator, id);
+ }
}
var manifest_entry_parse = try this.task_queue.getOrPutContext(this.allocator, task_id, .{});
@@ -3131,7 +3300,7 @@ pub const PackageManager = struct {
return;
},
.git => {
- const dep = &dependency.version.value.git;
+ const dep = &version.value.git;
const res = Resolution{
.tag = .git,
.value = .{
@@ -3154,6 +3323,18 @@ pub const PackageManager = struct {
id,
);
+ if (comptime Environment.allow_assert)
+ debug(
+ "enqueueDependency({d}, {s}, {s}, {s}) = {s}",
+ .{
+ id,
+ @tagName(version.tag),
+ this.lockfile.str(&name),
+ this.lockfile.str(&version.literal),
+ url,
+ },
+ );
+
if (this.git_repositories.get(clone_id)) |repo_fd| {
const resolved = try Repository.findCommit(
this.allocator,
@@ -3198,7 +3379,7 @@ pub const PackageManager = struct {
}
},
.github => {
- const dep = &dependency.version.value.github;
+ const dep = &version.value.github;
const res = Resolution{
.tag = .github,
.value = .{
@@ -3220,6 +3401,18 @@ pub const PackageManager = struct {
entry.value_ptr.* = TaskCallbackList{};
}
+ if (comptime Environment.allow_assert)
+ debug(
+ "enqueueDependency({d}, {s}, {s}, {s}) = {s}",
+ .{
+ id,
+ @tagName(version.tag),
+ this.lockfile.str(&name),
+ this.lockfile.str(&version.literal),
+ url,
+ },
+ );
+
const callback_tag = comptime if (successFn == assignRootResolution) "root_dependency" else "dependency";
try entry.value_ptr.append(this.allocator, @unionInit(TaskCallbackContext, callback_tag, id));
@@ -3240,6 +3433,7 @@ pub const PackageManager = struct {
dependency.behavior,
id,
resolution,
+ install_peer,
successFn,
) catch |err| brk: {
if (err == error.MissingPackageJSON) {
@@ -3287,6 +3481,18 @@ pub const PackageManager = struct {
// should not trigger a network call
if (comptime Environment.allow_assert) std.debug.assert(result.network_task == null);
+
+ if (comptime Environment.allow_assert)
+ debug(
+ "enqueueDependency({d}, {s}, {s}, {s}) = {d}",
+ .{
+ id,
+ @tagName(version.tag),
+ this.lockfile.str(&name),
+ this.lockfile.str(&version.literal),
+ result.package.meta.id,
+ },
+ );
} else if (dependency.behavior.isRequired()) {
if (comptime dependency_tag == .workspace) {
this.log.addErrorFmt(
@@ -3336,7 +3542,7 @@ pub const PackageManager = struct {
}
},
.tarball => {
- const res: Resolution = switch (dependency.version.value.tarball.uri) {
+ const res: Resolution = switch (version.value.tarball.uri) {
.local => |path| .{
.tag = .local_tarball,
.value = .{
@@ -3357,7 +3563,7 @@ pub const PackageManager = struct {
return;
}
- const url = switch (dependency.version.value.tarball.uri) {
+ const url = switch (version.value.tarball.uri) {
.local => |path| this.lockfile.str(&path),
.remote => |url| this.lockfile.str(&url),
};
@@ -3367,11 +3573,23 @@ pub const PackageManager = struct {
entry.value_ptr.* = TaskCallbackList{};
}
+ if (comptime Environment.allow_assert)
+ debug(
+ "enqueueDependency({d}, {s}, {s}, {s}) = {s}",
+ .{
+ id,
+ @tagName(version.tag),
+ this.lockfile.str(&name),
+ this.lockfile.str(&version.literal),
+ url,
+ },
+ );
+
const callback_tag = comptime if (successFn == assignRootResolution) "root_dependency" else "dependency";
try entry.value_ptr.append(this.allocator, @unionInit(TaskCallbackContext, callback_tag, id));
if (dependency.behavior.isPeer()) return;
- switch (dependency.version.value.tarball.uri) {
+ switch (version.value.tarball.uri) {
.local => {
const network_entry = try this.network_dedupe_map.getOrPutContext(this.allocator, task_id, .{});
if (network_entry.found_existing) return;
@@ -3420,6 +3638,7 @@ pub const PackageManager = struct {
i,
&dependency,
lockfile.buffers.resolutions.items[i],
+ false,
) catch {};
}
}
@@ -3459,8 +3678,36 @@ pub const PackageManager = struct {
const lockfile = this.lockfile;
// Step 1. Go through main dependencies
- var i = dependencies_list.off;
+ var begin = dependencies_list.off;
const end = dependencies_list.off +| dependencies_list.len;
+
+ // if dependency is peer and is going to be installed
+ // through "dependencies", skip it
+ if (end - begin > 1 and lockfile.buffers.dependencies.items[0].behavior.isPeer()) {
+ var peer_i: usize = 0;
+ var peer = &lockfile.buffers.dependencies.items[peer_i];
+ while (peer.behavior.isPeer()) {
+ var dep_i: usize = end - 1;
+ var dep = lockfile.buffers.dependencies.items[dep_i];
+ while (!dep.behavior.isPeer()) {
+ if (!dep.behavior.isDev()) {
+ if (peer.name_hash == dep.name_hash) {
+ peer.* = lockfile.buffers.dependencies.items[begin];
+ begin += 1;
+ break;
+ }
+ }
+ dep_i -= 1;
+ dep = lockfile.buffers.dependencies.items[dep_i];
+ }
+ peer_i += 1;
+ if (peer_i == end) break;
+ peer = &lockfile.buffers.dependencies.items[peer_i];
+ }
+ }
+
+ var i = begin;
+
// we have to be very careful with pointers here
while (i < end) : (i += 1) {
const dependency = lockfile.buffers.dependencies.items[i];
@@ -3469,6 +3716,7 @@ pub const PackageManager = struct {
i,
&dependency,
resolution,
+ false,
) catch |err| {
const note = .{
.fmt = "error occured while resolving {s}",
@@ -3499,7 +3747,12 @@ pub const PackageManager = struct {
_ = this.scheduleTasks();
}
- fn processDependencyListItem(this: *PackageManager, item: TaskCallbackContext, any_root: ?*bool) !void {
+ fn processDependencyListItem(
+ this: *PackageManager,
+ item: TaskCallbackContext,
+ any_root: ?*bool,
+ install_peer: bool,
+ ) !void {
switch (item) {
.dependency => |dependency_id| {
const dependency = this.lockfile.buffers.dependencies.items[dependency_id];
@@ -3509,6 +3762,7 @@ pub const PackageManager = struct {
dependency_id,
&dependency,
resolution,
+ install_peer,
);
},
.root_dependency => |dependency_id| {
@@ -3519,6 +3773,7 @@ pub const PackageManager = struct {
dependency_id,
&dependency,
resolution,
+ install_peer,
assignRootResolution,
failRootResolution,
);
@@ -3533,18 +3788,37 @@ pub const PackageManager = struct {
}
}
+ fn processPeerDependencyList(
+ this: *PackageManager,
+ ) !void {
+ if (this.peer_dependencies.items.len > 0) {
+ for (this.peer_dependencies.items) |peer_dependency_id| {
+ try this.processDependencyListItem(.{ .dependency = peer_dependency_id }, null, true);
+ const dependency = this.lockfile.buffers.dependencies.items[peer_dependency_id];
+ const resolution = this.lockfile.buffers.resolutions.items[peer_dependency_id];
+ try this.enqueueDependencyWithMain(
+ peer_dependency_id,
+ &dependency,
+ resolution,
+ true,
+ );
+ }
+ }
+ }
+
fn processDependencyList(
this: *PackageManager,
dep_list: TaskCallbackList,
comptime Context: type,
ctx: Context,
comptime callbacks: anytype,
+ install_peer: bool,
) !void {
if (dep_list.items.len > 0) {
var dependency_list = dep_list;
var any_root = false;
for (dependency_list.items) |item| {
- try this.processDependencyListItem(item, &any_root);
+ try this.processDependencyListItem(item, &any_root, install_peer);
}
if (comptime @TypeOf(callbacks) != void and @TypeOf(callbacks.onResolve) != void) {
@@ -3744,6 +4018,7 @@ pub const PackageManager = struct {
comptime ExtractCompletionContext: type,
extract_ctx: ExtractCompletionContext,
comptime callbacks: anytype,
+ install_peer: bool,
comptime log_level: Options.LogLevel,
) anyerror!void {
var has_updated_this_run = false;
@@ -3835,10 +4110,10 @@ pub const PackageManager = struct {
switch (response.status_code) {
404 => {
if (comptime log_level != .silent) {
- const fmt = "\n<r><red>error<r>: package <b>\"{s}\"<r> not found <d>{s}{s} 404<r>\n";
+ const fmt = "\n<r><red>error<r>: package <b>\"{s}\"<r> not found <d>{}{s} 404<r>\n";
const args = .{
name.slice(),
- task.http.url.displayHostname(),
+ task.http.url.displayHost(),
task.http.url.pathname,
};
@@ -3852,10 +4127,10 @@ pub const PackageManager = struct {
},
401 => {
if (comptime log_level != .silent) {
- const fmt = "\n<r><red>error<r>: unauthorized <b>\"{s}\"<r> <d>{s}{s} 401<r>\n";
+ const fmt = "\n<r><red>error<r>: unauthorized <b>\"{s}\"<r> <d>{}{s} 401<r>\n";
const args = .{
name.slice(),
- task.http.url.displayHostname(),
+ task.http.url.displayHost(),
task.http.url.pathname,
};
@@ -3939,7 +4214,7 @@ pub const PackageManager = struct {
var dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
- try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks);
+ try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks, install_peer);
continue;
}
@@ -4116,7 +4391,7 @@ pub const PackageManager = struct {
var dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
- try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks);
+ try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks, install_peer);
if (comptime log_level.showProgress()) {
if (!has_updated_this_run) {
@@ -4202,7 +4477,7 @@ pub const PackageManager = struct {
},
else => unreachable,
}
- try manager.processDependencyListItem(dep, &any_root);
+ try manager.processDependencyListItem(dep, &any_root, install_peer);
},
else => {
// if it's a node_module folder to install, handle that after we process all the dependencies within the onExtract callback.
@@ -4217,12 +4492,15 @@ pub const PackageManager = struct {
var dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
- try manager.processDependencyList(dependency_list, void, {}, {});
+ try manager.processDependencyList(dependency_list, void, {}, {}, install_peer);
}
manager.setPreinstallState(package_id, manager.lockfile, .done);
if (comptime @TypeOf(callbacks.onExtract) != void) {
+ if (ExtractCompletionContext == *PackageInstaller) {
+ extract_ctx.fixCachedLockfilePackageSlices();
+ }
callbacks.onExtract(extract_ctx, dependency_id, task.data.extract, comptime log_level);
}
@@ -4268,7 +4546,7 @@ pub const PackageManager = struct {
var dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
- try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks);
+ try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks, install_peer);
if (comptime log_level.showProgress()) {
if (!has_updated_this_run) {
@@ -4328,7 +4606,7 @@ pub const PackageManager = struct {
var repo = &manager.lockfile.buffers.dependencies.items[id].version.value.git;
repo.resolved = pkg.resolution.value.git.resolved;
repo.package_name = pkg.name;
- try manager.processDependencyListItem(dep, &any_root);
+ try manager.processDependencyListItem(dep, &any_root, install_peer);
},
else => {
// if it's a node_module folder to install, handle that after we process all the dependencies within the onExtract callback.
@@ -4385,7 +4663,6 @@ pub const PackageManager = struct {
bin_path: stringZ = "node_modules/.bin",
lockfile_path: stringZ = Lockfile.default_filename,
- save_lockfile_path: stringZ = Lockfile.default_filename,
did_override_default_scope: bool = false,
scope: Npm.Registry.Scope = undefined,
@@ -4533,9 +4810,8 @@ pub const PackageManager = struct {
env: *DotEnv.Loader,
cli_: ?CommandLineArguments,
bun_install_: ?*Api.BunInstall,
+ subcommand: Subcommand,
) !void {
- this.save_lockfile_path = this.lockfile_path;
-
var base = Api.NpmRegistry{
.url = "",
.username = "",
@@ -4596,6 +4872,7 @@ pub const PackageManager = struct {
}
if (bun_install.save_peer) |save| {
+ this.do.install_peer_dependencies = save;
this.remote_package_features.peer_dependencies = save;
}
@@ -4623,19 +4900,6 @@ pub const PackageManager = struct {
this.local_package_features.optional_dependencies = save;
}
- if (bun_install.lockfile_path) |save| {
- if (save.len > 0) {
- this.lockfile_path = try allocator.dupeZ(u8, save);
- this.save_lockfile_path = this.lockfile_path;
- }
- }
-
- if (bun_install.save_lockfile_path) |save| {
- if (save.len > 0) {
- this.save_lockfile_path = try allocator.dupeZ(u8, save);
- }
- }
-
this.explicit_global_directory = bun_install.global_dir orelse this.explicit_global_directory;
}
@@ -4716,14 +4980,6 @@ pub const PackageManager = struct {
if (cli.token.len > 0) {
this.scope.token = cli.token;
}
-
- if (cli.lockfile.len > 0) {
- this.lockfile_path = try allocator.dupeZ(u8, cli.lockfile);
- }
- }
-
- if (env.map.get("BUN_CONFIG_LOCKFILE_SAVE_PATH")) |save_lockfile_path| {
- this.save_lockfile_path = try allocator.dupeZ(u8, save_lockfile_path);
}
if (env.map.get("BUN_CONFIG_YARN_LOCKFILE") != null) {
@@ -4772,6 +5028,12 @@ pub const PackageManager = struct {
this.do.verify_integrity = !strings.eqlComptime(check_bool, "0");
}
+ // Update should never read from manifest cache
+ if (subcommand == .update) {
+ this.enable.manifest_cache = false;
+ this.enable.manifest_cache_control = false;
+ }
+
if (cli_) |cli| {
if (cli.no_save) {
this.do.save_lockfile = false;
@@ -4881,6 +5143,7 @@ pub const PackageManager = struct {
print_meta_hash_string: bool = false,
verify_integrity: bool = true,
summary: bool = true,
+ install_peer_dependencies: bool = true,
};
pub const Enable = struct {
@@ -5203,21 +5466,22 @@ pub const PackageManager = struct {
bun.copy(u8, &cwd_buf, original_cwd);
+ var workspace_names = Package.WorkspaceMap.init(ctx.allocator);
+
// Step 1. Find the nearest package.json directory
//
- // We will walk up from the cwd, calling chdir on each directory until we find a package.json
- // If we fail to find one, we will report an error saying no packages to install
+ // We will walk up from the cwd, trying to find the nearest package.json file.
const package_json_file = brk: {
var this_cwd = original_cwd;
const child_json = child: {
while (true) {
- var dir = std.fs.openDirAbsolute(this_cwd, .{}) catch |err| {
- Output.prettyErrorln("Error {s} accessing {s}", .{ @errorName(err), this_cwd });
- Output.flush();
- return err;
- };
- defer dir.close();
- break :child dir.openFileZ("package.json", .{ .mode = .read_write }) catch {
+ const this_cwd_without_trailing_slash = strings.withoutTrailingSlash(this_cwd);
+ var buf2: [bun.MAX_PATH_BYTES + 1]u8 = undefined;
+ @memcpy(buf2[0..this_cwd_without_trailing_slash.len], this_cwd_without_trailing_slash);
+ buf2[this_cwd_without_trailing_slash.len..buf2.len][0.."/package.json".len].* = "/package.json".*;
+ buf2[this_cwd_without_trailing_slash.len + "/package.json".len] = 0;
+
+ break :child std.fs.cwd().openFileZ(buf2[0 .. this_cwd_without_trailing_slash.len + "/package.json".len :0].ptr, .{ .mode = .read_write }) catch {
if (std.fs.path.dirname(this_cwd)) |parent| {
this_cwd = parent;
continue;
@@ -5233,9 +5497,13 @@ pub const PackageManager = struct {
// Check if this is a workspace; if so, use root package
var found = false;
while (std.fs.path.dirname(this_cwd)) |parent| : (this_cwd = parent) {
- var dir = std.fs.openDirAbsolute(parent, .{}) catch break;
- defer dir.close();
- const json_file = dir.openFileZ("package.json", .{ .mode = .read_write }) catch {
+ const parent_without_trailing_slash = strings.withoutTrailingSlash(parent);
+ var buf2: [bun.MAX_PATH_BYTES + 1]u8 = undefined;
+ @memcpy(buf2[0..parent_without_trailing_slash.len], parent_without_trailing_slash);
+ buf2[parent_without_trailing_slash.len..buf2.len][0.."/package.json".len].* = "/package.json".*;
+ buf2[parent_without_trailing_slash.len + "/package.json".len] = 0;
+
+ const json_file = std.fs.cwd().openFileZ(buf2[0 .. parent_without_trailing_slash.len + "/package.json".len :0].ptr, .{ .mode = .read_write }) catch {
continue;
};
defer if (!found) json_file.close();
@@ -5248,8 +5516,6 @@ pub const PackageManager = struct {
initializeStore();
const json = try json_parser.ParseJSONUTF8(&json_source, ctx.log, ctx.allocator);
if (json.asProperty("workspaces")) |prop| {
- var workspace_names = Package.WorkspaceMap.init(ctx.allocator);
- defer workspace_names.deinit();
const json_array = switch (prop.expr.data) {
.e_array => |arr| arr,
.e_object => |obj| if (obj.get("packages")) |packages| switch (packages.data) {
@@ -5260,7 +5526,7 @@ pub const PackageManager = struct {
};
var log = logger.Log.init(ctx.allocator);
defer log.deinit();
- _ = Package.processWorkspaceNamesArray(
+ const workspace_packages_count = Package.processWorkspaceNamesArray(
&workspace_names,
ctx.allocator,
&log,
@@ -5269,6 +5535,7 @@ pub const PackageManager = struct {
prop.loc,
null,
) catch break;
+ _ = workspace_packages_count;
for (workspace_names.keys()) |path| {
if (strings.eql(child_cwd, path)) {
fs.top_level_dir = parent;
@@ -5331,6 +5598,13 @@ pub const PackageManager = struct {
} else |_| {}
}
+ var workspaces = std.StringArrayHashMap(?Semver.Version).init(ctx.allocator);
+ for (workspace_names.values()) |entry| {
+ try workspaces.put(entry.name, entry.version);
+ }
+
+ workspace_names.map.deinit();
+
var manager = &instance;
// var progress = Progress{};
// var node = progress.start(name: []const u8, estimated_total_items: usize)
@@ -5348,7 +5622,8 @@ pub const PackageManager = struct {
.resolve_tasks = TaskChannel.init(),
.lockfile = undefined,
.root_package_json_file = package_json_file,
- .waiter = Waiter.fromUWSLoop(uws.Loop.get()),
+ .waiter = try Waker.init(ctx.allocator),
+ .workspaces = workspaces,
// .progress
.uws_event_loop = uws.Loop.get(),
.file_poll_store = JSC.FilePoll.Store.init(ctx.allocator),
@@ -5379,6 +5654,7 @@ pub const PackageManager = struct {
env,
cli,
ctx.install,
+ subcommand,
);
manager.timestamp_for_manifest_cache_control = @as(u32, @truncate(@as(u64, @intCast(@max(std.time.timestamp(), 0)))));
@@ -5427,9 +5703,8 @@ pub const PackageManager = struct {
.resolve_tasks = TaskChannel.init(),
.lockfile = undefined,
.root_package_json_file = undefined,
- .waiter = Waiter.fromUWSLoop(uws.Loop.get()),
- .uws_event_loop = uws.Loop.get(),
- .file_poll_store = JSC.FilePoll.Store.init(allocator),
+ .waiter = try Waker.init(allocator),
+ .workspaces = std.StringArrayHashMap(?Semver.Version).init(allocator),
};
manager.lockfile = try allocator.create(Lockfile);
@@ -5466,6 +5741,7 @@ pub const PackageManager = struct {
env,
cli,
bun_install,
+ .install,
);
manager.timestamp_for_manifest_cache_control = @as(
@@ -5822,7 +6098,6 @@ pub const PackageManager = struct {
clap.parseParam("--no-save Don't save a lockfile") catch unreachable,
clap.parseParam("--save Save to package.json") catch unreachable,
clap.parseParam("--dry-run Don't install anything") catch unreachable,
- clap.parseParam("--lockfile <PATH> Store & load a lockfile at a specific filepath") catch unreachable,
clap.parseParam("--frozen-lockfile Disallow changes to lockfile") catch unreachable,
clap.parseParam("-f, --force Always request the latest versions from the registry & reinstall all dependencies") catch unreachable,
clap.parseParam("--cache-dir <PATH> Store & load cached data from a specific directory path") catch unreachable,
@@ -5848,7 +6123,7 @@ pub const PackageManager = struct {
clap.parseParam("-d, --dev Add dependency to \"devDependencies\"") catch unreachable,
clap.parseParam("-D, --development") catch unreachable,
clap.parseParam("--optional Add dependency to \"optionalDependencies\"") catch unreachable,
- clap.parseParam("--exact Add the exact version instead of the ^range") catch unreachable,
+ clap.parseParam("-E, --exact Add the exact version instead of the ^range") catch unreachable,
clap.parseParam("<POS> ... ") catch unreachable,
};
@@ -5864,12 +6139,12 @@ pub const PackageManager = struct {
clap.parseParam("-d, --dev Add dependency to \"devDependencies\"") catch unreachable,
clap.parseParam("-D, --development") catch unreachable,
clap.parseParam("--optional Add dependency to \"optionalDependencies\"") catch unreachable,
- clap.parseParam("--exact Add the exact version instead of the ^range") catch unreachable,
- clap.parseParam("<POS> ... \"name\" or \"name@version\" of packages to install") catch unreachable,
+ clap.parseParam("-E, --exact Add the exact version instead of the ^range") catch unreachable,
+ clap.parseParam("<POS> ... \"name\" or \"name@version\" of package(s) to install") catch unreachable,
};
const remove_params = install_params_ ++ [_]ParamType{
- clap.parseParam("<POS> ... \"name\" of packages to remove from package.json") catch unreachable,
+ clap.parseParam("<POS> ... \"name\" of package(s) to remove from package.json") catch unreachable,
};
const link_params = install_params_ ++ [_]ParamType{
@@ -6012,10 +6287,6 @@ pub const PackageManager = struct {
// }
// }
- if (args.option("--lockfile")) |lockfile| {
- cli.lockfile = lockfile;
- }
-
if (args.option("--cwd")) |cwd_| {
var buf: [bun.MAX_PATH_BYTES]u8 = undefined;
var buf2: [bun.MAX_PATH_BYTES]u8 = undefined;
@@ -6096,7 +6367,7 @@ pub const PackageManager = struct {
var value = input;
var alias: ?string = null;
- if (strings.isNPMPackageName(input)) {
+ if (!Dependency.isTarball(input) and strings.isNPMPackageName(input)) {
alias = input;
value = input[input.len..];
} else if (input.len > 1) {
@@ -6155,6 +6426,8 @@ pub const PackageManager = struct {
request.is_aliased = true;
request.name = allocator.dupe(u8, name) catch unreachable;
request.name_hash = String.Builder.stringHash(name);
+ } else if (version.tag == .github and version.value.github.committish.isEmpty()) {
+ request.name_hash = String.Builder.stringHash(version.literal.slice(input));
} else {
request.name_hash = String.Builder.stringHash(version.literal.slice(input));
}
@@ -6213,7 +6486,7 @@ pub const PackageManager = struct {
) !void {
var update_requests = try UpdateRequest.Array.init(0);
- if (manager.options.positionals.len == 1) {
+ if (manager.options.positionals.len <= 1) {
var examples_to_print: [3]string = undefined;
const off = @as(u64, @intCast(std.time.milliTimestamp()));
@@ -6645,6 +6918,7 @@ pub const PackageManager = struct {
folder_path_buf: [bun.MAX_PATH_BYTES]u8 = undefined,
install_count: usize = 0,
successfully_installed: Bitset,
+ tree_iterator: *Lockfile.Tree.Iterator,
// For linking native binaries, we only want to link after we've installed the companion dependencies
// We don't want to introduce dependent callbacks like that for every single package
@@ -6656,6 +6930,16 @@ pub const PackageManager = struct {
node_modules_folder: std.fs.IterableDir,
};
+ /// Call when you mutate the length of `lockfile.packages`
+ pub fn fixCachedLockfilePackageSlices(this: *PackageInstaller) void {
+ var packages = this.lockfile.packages.slice();
+ this.metas = packages.items(.meta);
+ this.names = packages.items(.name);
+ this.bins = packages.items(.bin);
+ this.resolutions = packages.items(.resolution);
+ this.tree_iterator.reload(this.lockfile);
+ }
+
/// Install versions of a package which are waiting on a network request
pub fn installEnqueuedPackages(
this: *PackageInstaller,
@@ -7265,38 +7549,38 @@ pub const PackageManager = struct {
var summary = PackageInstall.Summary{};
{
- var parts = lockfile.packages.slice();
- var metas = parts.items(.meta);
- var names = parts.items(.name);
- var dependencies = lockfile.buffers.dependencies.items;
- const resolutions_buffer: []const PackageID = lockfile.buffers.resolutions.items;
- const resolution_lists: []const Lockfile.PackageIDSlice = parts.items(.resolutions);
- var resolutions = parts.items(.resolution);
-
var iterator = Lockfile.Tree.Iterator.init(lockfile);
- var installer = PackageInstaller{
- .manager = this,
- .options = &this.options,
- .metas = metas,
- .bins = parts.items(.bin),
- .root_node_modules_folder = node_modules_folder,
- .names = names,
- .resolutions = resolutions,
- .lockfile = lockfile,
- .node = &install_node,
- .node_modules_folder = node_modules_folder,
- .progress = progress,
- .skip_verify_installed_version_number = skip_verify_installed_version_number,
- .skip_delete = skip_delete,
- .summary = &summary,
- .global_bin_dir = this.options.global_bin_dir,
- .force_install = force_install,
- .install_count = lockfile.buffers.hoisted_dependencies.items.len,
- .successfully_installed = try Bitset.initEmpty(
- this.allocator,
- lockfile.packages.len,
- ),
+ var installer: PackageInstaller = brk: {
+ // These slices potentially get resized during iteration
+ // so we want to make sure they're not accessible to the rest of this function
+ // to make mistakes harder
+ var parts = lockfile.packages.slice();
+
+ break :brk PackageInstaller{
+ .manager = this,
+ .options = &this.options,
+ .metas = parts.items(.meta),
+ .bins = parts.items(.bin),
+ .root_node_modules_folder = node_modules_folder,
+ .names = parts.items(.name),
+ .resolutions = parts.items(.resolution),
+ .lockfile = lockfile,
+ .node = &install_node,
+ .node_modules_folder = node_modules_folder,
+ .progress = progress,
+ .skip_verify_installed_version_number = skip_verify_installed_version_number,
+ .skip_delete = skip_delete,
+ .summary = &summary,
+ .global_bin_dir = this.options.global_bin_dir,
+ .force_install = force_install,
+ .install_count = lockfile.buffers.hoisted_dependencies.items.len,
+ .successfully_installed = try Bitset.initEmpty(
+ this.allocator,
+ lockfile.packages.len,
+ ),
+ .tree_iterator = &iterator,
+ };
};
while (iterator.nextNodeModulesFolder()) |node_modules| {
@@ -7305,7 +7589,10 @@ pub const PackageManager = struct {
// We use this file descriptor to know where to put it.
installer.node_modules_folder = cwd.openIterableDir(node_modules.relative_path, .{}) catch brk: {
// Avoid extra mkdir() syscall
- try cwd.makePath(bun.span(node_modules.relative_path));
+ //
+ // note: this will recursively delete any dangling symlinks
+ // in the next.js repo, it encounters a dangling symlink in node_modules/@next/codemod/node_modules/cheerio
+ try bun.makePath(cwd, bun.span(node_modules.relative_path));
break :brk try cwd.openIterableDir(node_modules.relative_path, .{});
};
@@ -7335,6 +7622,7 @@ pub const PackageManager = struct {
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
+ true,
log_level,
);
if (!installer.options.do.install_packages) return error.InstallFailed;
@@ -7354,12 +7642,13 @@ pub const PackageManager = struct {
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
+ true,
log_level,
);
if (!installer.options.do.install_packages) return error.InstallFailed;
}
- while (this.pending_tasks > 0 and installer.options.do.install_packages) : (this.sleep()) {
+ while (this.pending_tasks > 0 and installer.options.do.install_packages) {
try this.runTasks(
*PackageInstaller,
&installer,
@@ -7369,94 +7658,110 @@ pub const PackageManager = struct {
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
+ true,
log_level,
);
+
+ if (PackageManager.verbose_install and this.pending_tasks > 0) {
+ Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n", .{this.pending_tasks});
+ }
+
+ if (this.pending_tasks > 0)
+ this.sleep();
}
if (!installer.options.do.install_packages) return error.InstallFailed;
summary.successfully_installed = installer.successfully_installed;
- outer: for (installer.platform_binlinks.items) |deferred| {
- const dependency_id = deferred.dependency_id;
- const package_id = resolutions_buffer[dependency_id];
- const folder = deferred.node_modules_folder;
-
- const package_resolutions: []const PackageID = resolution_lists[package_id].get(resolutions_buffer);
- const original_bin: Bin = installer.bins[package_id];
-
- for (package_resolutions) |resolved_id| {
- if (resolved_id >= names.len) continue;
- const meta: Lockfile.Package.Meta = metas[resolved_id];
-
- // This is specifically for platform-specific binaries
- if (meta.os == .all and meta.arch == .all) continue;
-
- // Don't attempt to link incompatible binaries
- if (meta.isDisabled()) continue;
-
- const name = lockfile.str(&dependencies[dependency_id].name);
-
- if (!installer.has_created_bin) {
- if (!this.options.global) {
- if (comptime Environment.isWindows) {
- std.os.mkdiratW(node_modules_folder.dir.fd, bun.strings.w(".bin"), 0) catch {};
- } else {
- node_modules_folder.dir.makeDirZ(".bin") catch {};
+ {
+ var parts = lockfile.packages.slice();
+ var metas = parts.items(.meta);
+ var names = parts.items(.name);
+ var dependencies = lockfile.buffers.dependencies.items;
+ const resolutions_buffer: []const PackageID = lockfile.buffers.resolutions.items;
+ const resolution_lists: []const Lockfile.PackageIDSlice = parts.items(.resolutions);
+ outer: for (installer.platform_binlinks.items) |deferred| {
+ const dependency_id = deferred.dependency_id;
+ const package_id = resolutions_buffer[dependency_id];
+ const folder = deferred.node_modules_folder;
+
+ const package_resolutions: []const PackageID = resolution_lists[package_id].get(resolutions_buffer);
+ const original_bin: Bin = installer.bins[package_id];
+
+ for (package_resolutions) |resolved_id| {
+ if (resolved_id >= names.len) continue;
+ const meta: Lockfile.Package.Meta = metas[resolved_id];
+
+ // This is specifically for platform-specific binaries
+ if (meta.os == .all and meta.arch == .all) continue;
+
+ // Don't attempt to link incompatible binaries
+ if (meta.isDisabled()) continue;
+
+ const name = lockfile.str(&dependencies[dependency_id].name);
+
+ if (!installer.has_created_bin) {
+ if (!this.options.global) {
+ if (comptime Environment.isWindows) {
+ std.os.mkdiratW(node_modules_folder.dir.fd, bun.strings.w(".bin"), 0) catch {};
+ } else {
+ node_modules_folder.dir.makeDirZ(".bin") catch {};
+ }
}
+ if (comptime Environment.isPosix)
+ Bin.Linker.umask = C.umask(0);
+ installer.has_created_bin = true;
}
- if (comptime Environment.isPosix)
- Bin.Linker.umask = C.umask(0);
- installer.has_created_bin = true;
- }
- var bin_linker = Bin.Linker{
- .bin = original_bin,
- .package_installed_node_modules = bun.toFD(folder.dir.fd),
- .root_node_modules_folder = bun.toFD(node_modules_folder.dir.fd),
- .global_bin_path = this.options.bin_path,
- .global_bin_dir = this.options.global_bin_dir.dir,
+ var bin_linker = Bin.Linker{
+ .bin = original_bin,
+ .package_installed_node_modules = bun.toFD(folder.dir.fd),
+ .root_node_modules_folder = bun.toFD(node_modules_folder.dir.fd),
+ .global_bin_path = this.options.bin_path,
+ .global_bin_dir = this.options.global_bin_dir.dir,
- .package_name = strings.StringOrTinyString.init(name),
- .string_buf = lockfile.buffers.string_bytes.items,
- .extern_string_buf = lockfile.buffers.extern_strings.items,
- };
+ .package_name = strings.StringOrTinyString.init(name),
+ .string_buf = lockfile.buffers.string_bytes.items,
+ .extern_string_buf = lockfile.buffers.extern_strings.items,
+ };
- bin_linker.link(this.options.global);
+ bin_linker.link(this.options.global);
- if (bin_linker.err) |err| {
- if (comptime log_level != .silent) {
- const fmt = "\n<r><red>error:<r> linking <b>{s}<r>: {s}\n";
- const args = .{ name, @errorName(err) };
+ if (bin_linker.err) |err| {
+ if (comptime log_level != .silent) {
+ const fmt = "\n<r><red>error:<r> linking <b>{s}<r>: {s}\n";
+ const args = .{ name, @errorName(err) };
- if (comptime log_level.showProgress()) {
- switch (Output.enable_ansi_colors) {
- inline else => |enable_ansi_colors| {
- this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args);
- },
+ if (comptime log_level.showProgress()) {
+ switch (Output.enable_ansi_colors) {
+ inline else => |enable_ansi_colors| {
+ this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args);
+ },
+ }
+ } else {
+ Output.prettyErrorln(fmt, args);
}
- } else {
- Output.prettyErrorln(fmt, args);
}
+
+ if (this.options.enable.fail_early) Global.crash();
}
- if (this.options.enable.fail_early) Global.crash();
+ continue :outer;
}
- continue :outer;
- }
-
- if (comptime log_level != .silent) {
- const fmt = "\n<r><yellow>warn:<r> no compatible binaries found for <b>{s}<r>\n";
- const args = .{lockfile.str(&names[package_id])};
+ if (comptime log_level != .silent) {
+ const fmt = "\n<r><yellow>warn:<r> no compatible binaries found for <b>{s}<r>\n";
+ const args = .{lockfile.str(&names[package_id])};
- if (comptime log_level.showProgress()) {
- switch (Output.enable_ansi_colors) {
- inline else => |enable_ansi_colors| {
- this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args);
- },
+ if (comptime log_level.showProgress()) {
+ switch (Output.enable_ansi_colors) {
+ inline else => |enable_ansi_colors| {
+ this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args);
+ },
+ }
+ } else {
+ Output.prettyErrorln(fmt, args);
}
- } else {
- Output.prettyErrorln(fmt, args);
}
}
}
@@ -7515,15 +7820,17 @@ pub const PackageManager = struct {
)
else
.{ .not_found = {} };
+
var root = Lockfile.Package{};
- var needs_new_lockfile = load_lockfile_result != .ok or (load_lockfile_result.ok.buffers.dependencies.items.len == 0 and manager.package_json_updates.len > 0);
+ var needs_new_lockfile = load_lockfile_result != .ok or
+ (load_lockfile_result.ok.buffers.dependencies.items.len == 0 and manager.package_json_updates.len > 0);
+
// this defaults to false
// but we force allowing updates to the lockfile when you do bun add
var had_any_diffs = false;
manager.progress = .{};
// Step 2. Parse the package.json file
- //
var package_json_source = logger.Source.initPathString(package_json_cwd, package_json_contents);
switch (load_lockfile_result) {
@@ -7539,6 +7846,9 @@ pub const PackageManager = struct {
.read_file => Output.prettyError("<r><red>error<r> reading lockfile:<r> {s}\n<r>", .{
@errorName(cause.value),
}),
+ .migrating => Output.prettyError("<r><red>error<r> migrating lockfile:<r> {s}\n<r>", .{
+ @errorName(cause.value),
+ }),
}
if (manager.options.enable.fail_early) {
@@ -7617,6 +7927,8 @@ pub const PackageManager = struct {
new_dep.count(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder);
}
+ lockfile.overrides.count(&lockfile, builder);
+
maybe_root.scripts.count(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder);
const off = @as(u32, @truncate(manager.lockfile.buffers.dependencies.items.len));
@@ -7630,6 +7942,27 @@ pub const PackageManager = struct {
manager.root_dependency_list = dep_lists[0];
try builder.allocate();
+ const all_name_hashes: []PackageNameHash = brk: {
+ if (!manager.summary.overrides_changed) break :brk &.{};
+ const hashes_len = manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len;
+ if (hashes_len == 0) break :brk &.{};
+ var all_name_hashes = try bun.default_allocator.alloc(PackageNameHash, hashes_len);
+ @memcpy(all_name_hashes[0..manager.lockfile.overrides.map.entries.len], manager.lockfile.overrides.map.keys());
+ @memcpy(all_name_hashes[manager.lockfile.overrides.map.entries.len..], lockfile.overrides.map.keys());
+ var i = manager.lockfile.overrides.map.entries.len;
+ while (i < all_name_hashes.len) {
+ if (std.mem.indexOfScalar(PackageNameHash, all_name_hashes[0..i], all_name_hashes[i]) != null) {
+ all_name_hashes[i] = all_name_hashes[all_name_hashes.len - 1];
+ all_name_hashes.len -= 1;
+ } else {
+ i += 1;
+ }
+ }
+ break :brk all_name_hashes;
+ };
+
+ manager.lockfile.overrides = try lockfile.overrides.clone(&lockfile, manager.lockfile, builder);
+
try manager.lockfile.buffers.dependencies.ensureUnusedCapacity(manager.lockfile.allocator, len);
try manager.lockfile.buffers.resolutions.ensureUnusedCapacity(manager.lockfile.allocator, len);
@@ -7652,6 +7985,20 @@ pub const PackageManager = struct {
}
}
+ if (manager.summary.overrides_changed and all_name_hashes.len > 0) {
+ for (manager.lockfile.buffers.dependencies.items, 0..) |*dependency, dependency_i| {
+ if (std.mem.indexOfScalar(PackageNameHash, all_name_hashes, dependency.name_hash)) |_| {
+ manager.lockfile.buffers.resolutions.items[dependency_i] = invalid_package_id;
+ try manager.enqueueDependencyWithMain(
+ @truncate(dependency_i),
+ dependency,
+ manager.lockfile.buffers.resolutions.items[dependency_i],
+ false,
+ );
+ }
+ }
+ }
+
manager.lockfile.packages.items(.scripts)[0] = maybe_root.scripts.clone(
lockfile.buffers.string_bytes.items,
*Lockfile.StringBuilder,
@@ -7667,6 +8014,7 @@ pub const PackageManager = struct {
_ = manager.getCacheDirectory();
_ = manager.getTemporaryDirectory();
+
while (counter_i < changes) : (counter_i += 1) {
if (mapping[counter_i] == invalid_package_id) {
const dependency_i = counter_i + off;
@@ -7675,6 +8023,7 @@ pub const PackageManager = struct {
dependency_i,
&dependency,
manager.lockfile.buffers.resolutions.items[dependency_i],
+ false,
);
}
}
@@ -7729,7 +8078,7 @@ pub const PackageManager = struct {
manager.drainDependencyList();
}
- if (manager.pending_tasks > 0) {
+ if (manager.pending_tasks > 0 or manager.peer_dependencies.items.len > 0) {
if (root.dependencies.len > 0) {
_ = manager.getCacheDirectory();
_ = manager.getTemporaryDirectory();
@@ -7742,7 +8091,7 @@ pub const PackageManager = struct {
Output.flush();
}
- while (manager.pending_tasks > 0) : (manager.sleep()) {
+ while (manager.pending_tasks > 0) {
try manager.runTasks(
*PackageManager,
manager,
@@ -7753,8 +8102,45 @@ pub const PackageManager = struct {
.onPackageDownloadError = {},
.progress_bar = true,
},
+ false,
log_level,
);
+
+ if (PackageManager.verbose_install and manager.pending_tasks > 0) {
+ Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n", .{manager.pending_tasks});
+ }
+
+ if (manager.pending_tasks > 0)
+ manager.sleep();
+ }
+
+ if (manager.options.do.install_peer_dependencies) {
+ try manager.processPeerDependencyList();
+
+ manager.drainDependencyList();
+
+ while (manager.pending_tasks > 0) {
+ try manager.runTasks(
+ *PackageManager,
+ manager,
+ .{
+ .onExtract = {},
+ .onResolve = {},
+ .onPackageManifestError = {},
+ .onPackageDownloadError = {},
+ .progress_bar = true,
+ },
+ true,
+ log_level,
+ );
+
+ if (PackageManager.verbose_install and manager.pending_tasks > 0) {
+ Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n", .{manager.pending_tasks});
+ }
+
+ if (manager.pending_tasks > 0)
+ manager.sleep();
+ }
}
if (comptime log_level.showProgress()) {
@@ -7810,7 +8196,7 @@ pub const PackageManager = struct {
save: {
if (manager.lockfile.isEmpty()) {
if (!manager.options.dry_run) {
- std.fs.cwd().deleteFileZ(manager.options.save_lockfile_path) catch |err| brk: {
+ std.fs.cwd().deleteFileZ(manager.options.lockfile_path) catch |err| brk: {
// we don't care
if (err == error.FileNotFound) {
if (had_any_diffs) break :save;
@@ -7838,7 +8224,7 @@ pub const PackageManager = struct {
manager.progress.refresh();
}
- manager.lockfile.saveToDisk(manager.options.save_lockfile_path);
+ manager.lockfile.saveToDisk(manager.options.lockfile_path);
if (comptime log_level.showProgress()) {
node.end();
manager.progress.refresh();
@@ -7943,13 +8329,14 @@ pub const PackageManager = struct {
if (install_summary.success > 0) {
// it's confusing when it shows 3 packages and says it installed 1
- Output.pretty("\n <green>{d}<r> packages<r> installed ", .{@max(
+ const pkgs_installed = @max(
install_summary.success,
@as(
u32,
@truncate(manager.package_json_updates.len),
),
- )});
+ );
+ Output.pretty("\n <green>{d}<r> package{s}<r> installed ", .{ pkgs_installed, if (pkgs_installed == 1) "" else "s" });
Output.printStartEndStdout(ctx.start_time, std.time.nanoTimestamp());
printed_timestamp = true;
Output.pretty("<r>\n", .{});
@@ -7964,7 +8351,7 @@ pub const PackageManager = struct {
}
}
- Output.pretty("\n <r><b>{d}<r> packages removed ", .{manager.summary.remove});
+ Output.pretty("\n <r><b>{d}<r> package{s} removed ", .{ manager.summary.remove, if (manager.summary.remove == 1) "" else "s" });
Output.printStartEndStdout(ctx.start_time, std.time.nanoTimestamp());
printed_timestamp = true;
Output.pretty("<r>\n", .{});
@@ -7973,16 +8360,19 @@ pub const PackageManager = struct {
const count = @as(PackageID, @truncate(manager.lockfile.packages.len));
if (count != install_summary.skipped) {
- Output.pretty("Checked <green>{d} installs<r> across {d} packages <d>(no changes)<r> ", .{
+ Output.pretty("Checked <green>{d} install{s}<r> across {d} package{s} <d>(no changes)<r> ", .{
install_summary.skipped,
+ if (install_summary.skipped == 1) "" else "s",
count,
+ if (count == 1) "" else "s",
});
Output.printStartEndStdout(ctx.start_time, std.time.nanoTimestamp());
printed_timestamp = true;
Output.pretty("<r>\n", .{});
} else {
- Output.pretty("<r> <green>Done<r>! Checked {d} packages<r> <d>(no changes)<r> ", .{
+ Output.pretty("<r> <green>Done<r>! Checked {d} package{s}<r> <d>(no changes)<r> ", .{
install_summary.skipped,
+ if (install_summary.skipped == 1) "" else "s",
});
Output.printStartEndStdout(ctx.start_time, std.time.nanoTimestamp());
printed_timestamp = true;
@@ -7991,7 +8381,7 @@ pub const PackageManager = struct {
}
if (install_summary.fail > 0) {
- Output.prettyln("<r>Failed to install <red><b>{d}<r> packages\n", .{install_summary.fail});
+ Output.prettyln("<r>Failed to install <red><b>{d}<r> package{s}\n", .{ install_summary.fail, if (install_summary.fail == 1) "" else "s" });
Output.flush();
}
}
@@ -8048,6 +8438,7 @@ test "UpdateRequests.parse" {
"baz",
"boo@1.0.0",
"bing@latest",
+ "github:bar/foo",
};
var reqs = PackageManager.UpdateRequest.parse(default_allocator, &log, updates, &array, .add);
@@ -8056,11 +8447,12 @@ test "UpdateRequests.parse" {
try std.testing.expectEqualStrings(reqs[2].name, "bar");
try std.testing.expectEqualStrings(reqs[3].name, "baz");
try std.testing.expectEqualStrings(reqs[4].name, "boo");
+ try std.testing.expectEqualStrings(reqs[7].name, "github:bar/foo");
try std.testing.expectEqual(reqs[4].version.tag, Dependency.Version.Tag.npm);
try std.testing.expectEqualStrings(reqs[4].version.literal.slice("boo@1.0.0"), "1.0.0");
try std.testing.expectEqual(reqs[5].version.tag, Dependency.Version.Tag.dist_tag);
try std.testing.expectEqualStrings(reqs[5].version.literal.slice("bing@1.0.0"), "latest");
- try std.testing.expectEqual(updates.len, 6);
+ try std.testing.expectEqual(updates.len, 7);
}
test "PackageManager.Options - default registry, default values" {
diff --git a/src/install/integrity.zig b/src/install/integrity.zig
index dd11140de..c0b02d4bf 100644
--- a/src/install/integrity.zig
+++ b/src/install/integrity.zig
@@ -3,34 +3,26 @@ const strings = @import("../string_immutable.zig");
const Crypto = @import("../sha.zig").Hashers;
pub const Integrity = extern struct {
+ const empty_digest_buf: [Integrity.digest_buf_len]u8 = [_]u8{0} ** Integrity.digest_buf_len;
+
tag: Tag = Tag.unknown,
/// Possibly a [Subresource Integrity](https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity) value initially
/// We transform it though.
- value: [digest_buf_len]u8 = undefined,
+ value: [digest_buf_len]u8 = empty_digest_buf,
const Base64 = std.base64.standard_no_pad;
- pub const digest_buf_len: usize = brk: {
- const values = [_]usize{
- std.crypto.hash.Sha1.digest_length,
- std.crypto.hash.sha2.Sha512.digest_length,
- std.crypto.hash.sha2.Sha256.digest_length,
- std.crypto.hash.sha2.Sha384.digest_length,
- };
-
- var value: usize = 0;
- for (values) |val| {
- value = @max(val, value);
- }
-
- break :brk value;
- };
+ pub const digest_buf_len: usize = @max(
+ std.crypto.hash.Sha1.digest_length,
+ std.crypto.hash.sha2.Sha512.digest_length,
+ std.crypto.hash.sha2.Sha256.digest_length,
+ std.crypto.hash.sha2.Sha384.digest_length,
+ );
pub fn parseSHASum(buf: []const u8) !Integrity {
if (buf.len == 0) {
return Integrity{
.tag = Tag.unknown,
- .value = undefined,
};
}
@@ -40,8 +32,11 @@ pub const Integrity = extern struct {
var out_i: usize = 0;
var i: usize = 0;
- {
- @memset(&integrity.value, 0);
+ // initializer should zero it out
+ if (comptime @import("root").bun.Environment.allow_assert) {
+ for (integrity.value) |c| {
+ std.debug.assert(c == 0);
+ }
}
while (i < end) {
@@ -74,23 +69,20 @@ pub const Integrity = extern struct {
if (buf.len < "sha256-".len) {
return Integrity{
.tag = Tag.unknown,
- .value = undefined,
};
}
- var out: [digest_buf_len]u8 = undefined;
+ var out: [digest_buf_len]u8 = empty_digest_buf;
const tag = Tag.parse(buf);
if (tag == Tag.unknown) {
return Integrity{
.tag = Tag.unknown,
- .value = undefined,
};
}
Base64.Decoder.decode(&out, std.mem.trimRight(u8, buf["sha256-".len..], "=")) catch {
return Integrity{
.tag = Tag.unknown,
- .value = undefined,
};
};
@@ -203,4 +195,13 @@ pub const Integrity = extern struct {
unreachable;
}
+
+ comptime {
+ var integrity = Integrity{ .tag = Tag.sha1 };
+ for (integrity.value) |c| {
+ if (c != 0) {
+ @compileError("Integrity buffer is not zeroed");
+ }
+ }
+ }
};
diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig
index 759c5be90..bcfd72038 100644
--- a/src/install/lockfile.zig
+++ b/src/install/lockfile.zig
@@ -22,6 +22,7 @@ const json_parser = bun.JSON;
const JSPrinter = bun.js_printer;
const linker = @import("../linker.zig");
+const migration = @import("./migration.zig");
const sync = @import("../sync.zig");
const Api = @import("../api/schema.zig").Api;
@@ -86,9 +87,9 @@ const StaticHashMap = @import("../StaticHashMap.zig").StaticHashMap;
const MetaHash = [std.crypto.hash.sha2.Sha512256.digest_length]u8;
const zero_hash = std.mem.zeroes(MetaHash);
-const NameHashMap = std.ArrayHashMapUnmanaged(u32, String, ArrayIdentityContext, false);
-const NameHashSet = std.ArrayHashMapUnmanaged(u32, void, ArrayIdentityContext, false);
-const VersionHashMap = std.ArrayHashMapUnmanaged(u32, Semver.Version, ArrayIdentityContext, false);
+pub const NameHashMap = std.ArrayHashMapUnmanaged(PackageNameHash, String, ArrayIdentityContext.U64, false);
+pub const NameHashSet = std.ArrayHashMapUnmanaged(u32, void, ArrayIdentityContext, false);
+pub const VersionHashMap = std.ArrayHashMapUnmanaged(PackageNameHash, Semver.Version, ArrayIdentityContext.U64, false);
const assertNoUninitializedPadding = @import("./padding_checker.zig").assertNoUninitializedPadding;
@@ -114,6 +115,7 @@ workspace_versions: VersionHashMap = .{},
has_trusted_dependencies: bool = false,
trusted_dependencies: NameHashSet = .{},
+overrides: OverrideMap = .{},
const Stream = std.io.FixedBufferStream([]u8);
pub const default_filename = "bun.lockb";
@@ -185,7 +187,7 @@ pub fn isEmpty(this: *const Lockfile) bool {
return this.packages.len == 0 or this.packages.len == 1 or this.packages.get(0).resolutions.len == 0;
}
-pub const LoadFromDiskResult = union(Tag) {
+pub const LoadFromDiskResult = union(enum) {
not_found: void,
err: struct {
step: Step,
@@ -193,26 +195,30 @@ pub const LoadFromDiskResult = union(Tag) {
},
ok: *Lockfile,
- pub const Step = enum { open_file, read_file, parse_file };
-
- pub const Tag = enum {
- not_found,
- err,
- ok,
- };
+ pub const Step = enum { open_file, read_file, parse_file, migrating };
};
pub fn loadFromDisk(this: *Lockfile, allocator: Allocator, log: *logger.Log, filename: stringZ) LoadFromDiskResult {
if (comptime Environment.allow_assert) std.debug.assert(FileSystem.instance_loaded);
- var file = std.io.getStdIn();
- if (filename.len > 0)
- file = std.fs.cwd().openFileZ(filename, .{ .mode = .read_only }) catch |err| {
+ var file = if (filename.len > 0)
+ std.fs.cwd().openFileZ(filename, .{ .mode = .read_only }) catch |err| {
return switch (err) {
- error.FileNotFound, error.AccessDenied, error.BadPathName => LoadFromDiskResult{ .not_found = {} },
+ error.FileNotFound => {
+ // Attempt to load from "package-lock.json", "yarn.lock", etc.
+ return migration.detectAndLoadOtherLockfile(
+ this,
+ allocator,
+ log,
+ filename,
+ );
+ },
+ error.AccessDenied, error.BadPathName => LoadFromDiskResult{ .not_found = {} },
else => LoadFromDiskResult{ .err = .{ .step = .open_file, .value = err } },
};
- };
+ }
+ else
+ std.io.getStdIn();
defer file.close();
var buf = file.readToEndAlloc(allocator, std.math.maxInt(usize)) catch |err| {
@@ -230,11 +236,16 @@ pub fn loadFromBytes(this: *Lockfile, buf: []u8, allocator: Allocator, log: *log
this.trusted_dependencies = .{};
this.workspace_paths = .{};
this.workspace_versions = .{};
+ this.overrides = .{};
Lockfile.Serializer.load(this, &stream, allocator, log) catch |err| {
return LoadFromDiskResult{ .err = .{ .step = .parse_file, .value = err } };
};
+ if (Environment.allow_assert) {
+ this.verifyData() catch @panic("lockfile data is corrupt");
+ }
+
return LoadFromDiskResult{ .ok = this };
}
@@ -315,6 +326,14 @@ pub const Tree = struct {
};
}
+ pub fn reload(this: *Iterator, lockfile: *const Lockfile) void {
+ this.trees = lockfile.buffers.trees.items;
+ this.dependency_ids = lockfile.buffers.hoisted_dependencies.items;
+ this.dependencies = lockfile.buffers.dependencies.items;
+ this.resolutions = lockfile.buffers.resolutions.items;
+ this.string_buf = lockfile.buffers.string_bytes.items;
+ }
+
pub fn nextNodeModulesFolder(this: *Iterator) ?NodeModulesFolder {
if (this.tree_id >= this.trees.len) return null;
@@ -401,7 +420,7 @@ pub const Tree = struct {
dependencies: Lockfile.DependencyIDList,
};
- pub const ArrayList = std.MultiArrayList(Entry);
+ pub const ArrayList = bun.MultiArrayList(Entry);
/// Flatten the multi-dimensional ArrayList of package IDs into a single easily serializable array
pub fn clean(this: *Builder) !DependencyIDList {
@@ -698,7 +717,7 @@ pub fn cleanWithLogger(
// If yes, choose that version instead.
// Why lower?
//
- // Normally, the problem is looks like this:
+ // Normally, the problem looks like this:
// Package A: "react@^17"
// Package B: "react@17.0.1
//
@@ -729,7 +748,7 @@ pub fn cleanWithLogger(
// }
// }
- var new = try old.allocator.create(Lockfile);
+ var new: *Lockfile = try old.allocator.create(Lockfile);
try new.initEmpty(
old.allocator,
);
@@ -740,6 +759,13 @@ pub fn cleanWithLogger(
old.scratch.dependency_list_queue.head = 0;
+ {
+ var builder = new.stringBuilder();
+ old.overrides.count(old, &builder);
+ try builder.allocate();
+ new.overrides = try old.overrides.clone(old, new, &builder);
+ }
+
// Step 1. Recreate the lockfile with only the packages that are still alive
const root = old.rootPackage() orelse return error.NoPackage;
@@ -756,12 +782,73 @@ pub fn cleanWithLogger(
.clone_queue = clone_queue_,
.log = log,
};
+
// try clone_queue.ensureUnusedCapacity(root.dependencies.len);
_ = try root.clone(old, new, package_id_mapping, &cloner);
+ // Clone workspace_paths and workspace_versions at the end.
+ if (old.workspace_paths.count() > 0 or old.workspace_versions.count() > 0) {
+ try new.workspace_paths.ensureTotalCapacity(z_allocator, old.workspace_paths.count());
+ try new.workspace_versions.ensureTotalCapacity(z_allocator, old.workspace_versions.count());
+
+ var workspace_paths_builder = new.stringBuilder();
+
+ const WorkspacePathSorter = struct {
+ string_buf: []const u8,
+ entries: NameHashMap.DataList,
+
+ pub fn lessThan(sorter: @This(), a: usize, b: usize) bool {
+ const left = sorter.entries.items(.value)[a];
+ const right = sorter.entries.items(.value)[b];
+ return strings.order(left.slice(sorter.string_buf), right.slice(sorter.string_buf)) == .lt;
+ }
+ };
+
+ // Sort by name for determinism
+ old.workspace_paths.sort(WorkspacePathSorter{
+ .entries = old.workspace_paths.entries,
+ .string_buf = old.buffers.string_bytes.items,
+ });
+
+ for (old.workspace_paths.values()) |*path| {
+ workspace_paths_builder.count(old.str(path));
+ }
+ const versions: []const Semver.Version = old.workspace_versions.values();
+ for (versions) |version| {
+ version.count(old.buffers.string_bytes.items, @TypeOf(&workspace_paths_builder), &workspace_paths_builder);
+ }
+
+ try workspace_paths_builder.allocate();
+
+ new.workspace_paths.entries.len = old.workspace_paths.entries.len;
+
+ for (old.workspace_paths.values(), new.workspace_paths.values()) |*src, *dest| {
+ dest.* = workspace_paths_builder.append(String, old.str(src));
+ }
+ @memcpy(
+ new.workspace_paths.keys(),
+ old.workspace_paths.keys(),
+ );
+
+ try new.workspace_versions.ensureTotalCapacity(z_allocator, old.workspace_versions.count());
+ new.workspace_versions.entries.len = old.workspace_versions.entries.len;
+ for (versions, new.workspace_versions.values()) |src, *dest| {
+ dest.* = src.clone(old.buffers.string_bytes.items, @TypeOf(&workspace_paths_builder), &workspace_paths_builder);
+ }
+
+ @memcpy(
+ new.workspace_versions.keys(),
+ old.workspace_versions.keys(),
+ );
+
+ workspace_paths_builder.clamp();
+
+ try new.workspace_versions.reIndex(z_allocator);
+ try new.workspace_paths.reIndex(z_allocator);
+ }
+
// When you run `"bun add react"
// This is where we update it in the lockfile from "latest" to "^17.0.2"
-
try cloner.flush();
// Don't allow invalid memory to happen
@@ -791,6 +878,7 @@ pub fn cleanWithLogger(
}
new.trusted_dependencies = old_trusted_dependencies;
new.scripts = old_scripts;
+
return new;
}
@@ -962,6 +1050,9 @@ pub const Printer = struct {
.read_file => Output.prettyErrorln("<r><red>error<r> reading lockfile:<r> {s}", .{
@errorName(cause.value),
}),
+ .migrating => Output.prettyErrorln("<r><red>error<r> while migrating lockfile:<r> {s}", .{
+ @errorName(cause.value),
+ }),
}
if (log.errors > 0) {
switch (Output.enable_ansi_colors) {
@@ -1017,6 +1108,7 @@ pub const Printer = struct {
env_loader,
null,
null,
+ .install,
);
var printer = Printer{
@@ -1083,21 +1175,39 @@ pub const Printer = struct {
if (!installed.isSet(package_id)) continue;
- const fmt = comptime brk: {
- if (enable_ansi_colors) {
- break :brk Output.prettyFmt("<r> <green>+<r> <b>{s}<r><d>@{}<r>\n", enable_ansi_colors);
- } else {
- break :brk Output.prettyFmt("<r> + {s}<r><d>@{}<r>\n", enable_ansi_colors);
- }
- };
+ if (PackageManager.instance.formatLaterVersionInCache(package_name, dependency.name_hash, resolved[package_id])) |later_version_fmt| {
+ const fmt = comptime brk: {
+ if (enable_ansi_colors) {
+ break :brk Output.prettyFmt("<r> <green>+<r> <b>{s}<r><d>@{}<r> <d>(<blue>v{} available<r><d>)<r>\n", enable_ansi_colors);
+ } else {
+ break :brk Output.prettyFmt("<r> + {s}<r><d>@{}<r> <d>(v{} available)<r>\n", enable_ansi_colors);
+ }
+ };
+ try writer.print(
+ fmt,
+ .{
+ package_name,
+ resolved[package_id].fmt(string_buf),
+ later_version_fmt,
+ },
+ );
+ } else {
+ const fmt = comptime brk: {
+ if (enable_ansi_colors) {
+ break :brk Output.prettyFmt("<r> <green>+<r> <b>{s}<r><d>@{}<r>\n", enable_ansi_colors);
+ } else {
+ break :brk Output.prettyFmt("<r> + {s}<r><d>@{}<r>\n", enable_ansi_colors);
+ }
+ };
- try writer.print(
- fmt,
- .{
- package_name,
- resolved[package_id].fmt(string_buf),
- },
- );
+ try writer.print(
+ fmt,
+ .{
+ package_name,
+ resolved[package_id].fmt(string_buf),
+ },
+ );
+ }
}
} else {
outer: for (dependencies_buffer, resolutions_buffer, 0..) |dependency, package_id, dep_id| {
@@ -1206,6 +1316,24 @@ pub const Printer = struct {
comptime Writer: type,
writer: Writer,
) !void {
+ // internal for debugging, print the lockfile as custom json
+ // limited to debug because we don't want people to rely on this format.
+ if (Environment.isDebug) {
+ if (std.os.getenv("JSON")) |_| {
+ try std.json.stringify(
+ this.lockfile,
+ .{
+ .whitespace = .indent_2,
+ .emit_null_optional_fields = true,
+ .emit_nonportable_numbers_as_strings = true,
+ },
+ writer,
+ );
+ try writer.writeAll("\n");
+ return;
+ }
+ }
+
try writer.writeAll(
\\# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
\\# yarn lockfile v1
@@ -1362,7 +1490,7 @@ pub const Printer = struct {
var behavior = Behavior.uninitialized;
var dependency_behavior_change_count: u8 = 0;
for (dependencies) |dep| {
- if (dep.behavior != behavior) {
+ if (!dep.behavior.eq(behavior)) {
if (dep.behavior.isOptional()) {
try writer.writeAll(" optionalDependencies:\n");
if (comptime Environment.allow_assert) dependency_behavior_change_count += 1;
@@ -1404,20 +1532,18 @@ pub const Printer = struct {
pub fn verifyData(this: *Lockfile) !void {
std.debug.assert(this.format == Lockfile.FormatVersion.current);
- {
- var i: usize = 0;
- while (i < this.packages.len) : (i += 1) {
- const package: Lockfile.Package = this.packages.get(i);
- std.debug.assert(this.str(&package.name).len == @as(usize, package.name.len()));
- std.debug.assert(String.Builder.stringHash(this.str(&package.name)) == @as(usize, package.name_hash));
- std.debug.assert(package.dependencies.get(this.buffers.dependencies.items).len == @as(usize, package.dependencies.len));
- std.debug.assert(package.resolutions.get(this.buffers.resolutions.items).len == @as(usize, package.resolutions.len));
- std.debug.assert(package.resolutions.get(this.buffers.resolutions.items).len == @as(usize, package.dependencies.len));
- const dependencies = package.dependencies.get(this.buffers.dependencies.items);
- for (dependencies) |dependency| {
- std.debug.assert(this.str(&dependency.name).len == @as(usize, dependency.name.len()));
- std.debug.assert(String.Builder.stringHash(this.str(&dependency.name)) == dependency.name_hash);
- }
+ var i: usize = 0;
+ while (i < this.packages.len) : (i += 1) {
+ const package: Lockfile.Package = this.packages.get(i);
+ std.debug.assert(this.str(&package.name).len == @as(usize, package.name.len()));
+ std.debug.assert(String.Builder.stringHash(this.str(&package.name)) == @as(usize, package.name_hash));
+ std.debug.assert(package.dependencies.get(this.buffers.dependencies.items).len == @as(usize, package.dependencies.len));
+ std.debug.assert(package.resolutions.get(this.buffers.resolutions.items).len == @as(usize, package.resolutions.len));
+ std.debug.assert(package.resolutions.get(this.buffers.resolutions.items).len == @as(usize, package.dependencies.len));
+ const dependencies = package.dependencies.get(this.buffers.dependencies.items);
+ for (dependencies) |dependency| {
+ std.debug.assert(this.str(&dependency.name).len == @as(usize, dependency.name.len()));
+ std.debug.assert(String.Builder.stringHash(this.str(&dependency.name)) == dependency.name_hash);
}
}
}
@@ -1437,7 +1563,7 @@ pub fn verifyResolutions(this: *Lockfile, local_features: Features, remote_featu
for (resolution_list.get(resolutions_buffer), dependency_list.get(dependencies_buffer)) |package_id, failed_dep| {
if (package_id < end) continue;
if (failed_dep.behavior.isPeer() or !failed_dep.behavior.isEnabled(
- if (root_list.contains(@as(PackageID, @truncate(parent_id))))
+ if (root_list.contains(@truncate(parent_id)))
local_features
else
remote_features,
@@ -1634,7 +1760,7 @@ pub fn appendPackage(this: *Lockfile, package_: Lockfile.Package) !Lockfile.Pack
fn appendPackageWithID(this: *Lockfile, package_: Lockfile.Package, id: PackageID) !Lockfile.Package {
defer {
- if (comptime Environment.isDebug) {
+ if (comptime Environment.allow_assert) {
std.debug.assert(this.getPackageID(package_.name_hash, null, &package_.resolution) != null);
}
}
@@ -1795,13 +1921,307 @@ pub const PackageIndex = struct {
};
};
+pub const OverrideMap = struct {
+ const debug = Output.scoped(.OverrideMap, false);
+
+ map: std.ArrayHashMapUnmanaged(PackageNameHash, Dependency, ArrayIdentityContext.U64, false) = .{},
+
+ /// In the future, this `get` function should handle multi-level resolutions. This is difficult right
+ /// now because given a Dependency ID, there is no fast way to trace it to it's package.
+ ///
+ /// A potential approach is to add another buffer to the lockfile that maps Dependency ID to Package ID,
+ /// and from there `OverrideMap.map` can have a union as the value, where the union is between "override all"
+ /// and "here is a list of overrides depending on the package that imported" similar to PackageIndex above.
+ pub fn get(this: *const OverrideMap, name_hash: PackageNameHash) ?Dependency.Version {
+ debug("looking up override for {x}", .{name_hash});
+ return if (this.map.get(name_hash)) |dep|
+ dep.version
+ else
+ null;
+ }
+
+ pub fn deinit(this: *OverrideMap, allocator: Allocator) void {
+ this.map.deinit(allocator);
+ }
+
+ pub fn count(this: *OverrideMap, lockfile: *Lockfile, builder: *Lockfile.StringBuilder) void {
+ for (this.map.values()) |dep| {
+ dep.count(lockfile.buffers.string_bytes.items, @TypeOf(builder), builder);
+ }
+ }
+
+ pub fn clone(this: *OverrideMap, old_lockfile: *Lockfile, new_lockfile: *Lockfile, new_builder: *Lockfile.StringBuilder) !OverrideMap {
+ var new = OverrideMap{};
+ try new.map.ensureTotalCapacity(new_lockfile.allocator, this.map.entries.len);
+
+ for (this.map.keys(), this.map.values()) |k, v| {
+ new.map.putAssumeCapacity(
+ k,
+ try v.clone(old_lockfile.buffers.string_bytes.items, @TypeOf(new_builder), new_builder),
+ );
+ }
+
+ return new;
+ }
+
+ // the rest of this struct is expression parsing code:
+
+ pub fn parseCount(
+ _: *OverrideMap,
+ lockfile: *Lockfile,
+ expr: Expr,
+ builder: *Lockfile.StringBuilder,
+ ) void {
+ if (expr.asProperty("overrides")) |overrides| {
+ if (overrides.expr.data != .e_object)
+ return;
+
+ for (overrides.expr.data.e_object.properties.slice()) |entry| {
+ builder.count(entry.key.?.asString(lockfile.allocator).?);
+ switch (entry.value.?.data) {
+ .e_string => |s| {
+ builder.count(s.slice(lockfile.allocator));
+ },
+ .e_object => {
+ if (entry.value.?.asProperty(".")) |dot| {
+ if (dot.expr.asString(lockfile.allocator)) |s| {
+ builder.count(s);
+ }
+ }
+ },
+ else => {},
+ }
+ }
+ } else if (expr.asProperty("resolutions")) |resolutions| {
+ if (resolutions.expr.data != .e_object)
+ return;
+
+ for (resolutions.expr.data.e_object.properties.slice()) |entry| {
+ builder.count(entry.key.?.asString(lockfile.allocator).?);
+ builder.count(entry.value.?.asString(lockfile.allocator) orelse continue);
+ }
+ }
+ }
+
+ /// Given a package json expression, detect and parse override configuration into the given override map.
+ /// It is assumed the input map is uninitialized (zero entries)
+ pub fn parseAppend(
+ this: *OverrideMap,
+ lockfile: *Lockfile,
+ root_package: *Lockfile.Package,
+ log: *logger.Log,
+ json_source: logger.Source,
+ expr: Expr,
+ builder: *Lockfile.StringBuilder,
+ ) !void {
+ if (Environment.allow_assert) {
+ std.debug.assert(this.map.entries.len == 0); // only call parse once
+ }
+ if (expr.asProperty("overrides")) |overrides| {
+ try this.parseFromOverrides(lockfile, root_package, json_source, log, overrides.expr, builder);
+ } else if (expr.asProperty("resolutions")) |resolutions| {
+ try this.parseFromResolutions(lockfile, root_package, json_source, log, resolutions.expr, builder);
+ }
+ debug("parsed {d} overrides", .{this.map.entries.len});
+ }
+
+ /// https://docs.npmjs.com/cli/v9/configuring-npm/package-json#overrides
+ pub fn parseFromOverrides(
+ this: *OverrideMap,
+ lockfile: *Lockfile,
+ root_package: *Lockfile.Package,
+ source: logger.Source,
+ log: *logger.Log,
+ expr: Expr,
+ builder: *Lockfile.StringBuilder,
+ ) !void {
+ if (expr.data != .e_object) {
+ try log.addWarningFmt(&source, expr.loc, lockfile.allocator, "\"overrides\" must be an object", .{});
+ return error.Invalid;
+ }
+
+ try this.map.ensureUnusedCapacity(lockfile.allocator, expr.data.e_object.properties.len);
+
+ for (expr.data.e_object.properties.slice()) |prop| {
+ const key = prop.key.?;
+ var k = key.asString(lockfile.allocator).?;
+ if (k.len == 0) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Missing overridden package name", .{});
+ continue;
+ }
+
+ const name_hash = String.Builder.stringHash(k);
+
+ const value = value: {
+ // for one level deep, we will only support a string and { ".": value }
+ const value_expr = prop.value.?;
+ if (value_expr.data == .e_string) {
+ break :value value_expr;
+ } else if (value_expr.data == .e_object) {
+ if (value_expr.asProperty(".")) |dot| {
+ if (dot.expr.data == .e_string) {
+ if (value_expr.data.e_object.properties.len > 1) {
+ try log.addWarningFmt(&source, value_expr.loc, lockfile.allocator, "Bun currently does not support nested \"overrides\"", .{});
+ }
+ break :value dot.expr;
+ } else {
+ try log.addWarningFmt(&source, value_expr.loc, lockfile.allocator, "Invalid override value for \"{s}\"", .{k});
+ continue;
+ }
+ } else {
+ try log.addWarningFmt(&source, value_expr.loc, lockfile.allocator, "Bun currently does not support nested \"overrides\"", .{});
+ continue;
+ }
+ }
+ try log.addWarningFmt(&source, value_expr.loc, lockfile.allocator, "Invalid override value for \"{s}\"", .{k});
+ continue;
+ };
+
+ if (try parseOverrideValue(
+ "override",
+ lockfile,
+ root_package,
+ source,
+ value.loc,
+ log,
+ k,
+ value.data.e_string.slice(lockfile.allocator),
+ builder,
+ )) |version| {
+ this.map.putAssumeCapacity(name_hash, version);
+ }
+ }
+ }
+
+ /// yarn classic: https://classic.yarnpkg.com/lang/en/docs/selective-version-resolutions/
+ /// yarn berry: https://yarnpkg.com/configuration/manifest#resolutions
+ pub fn parseFromResolutions(
+ this: *OverrideMap,
+ lockfile: *Lockfile,
+ root_package: *Lockfile.Package,
+ source: logger.Source,
+ log: *logger.Log,
+ expr: Expr,
+ builder: *Lockfile.StringBuilder,
+ ) !void {
+ if (expr.data != .e_object) {
+ try log.addWarningFmt(&source, expr.loc, lockfile.allocator, "\"resolutions\" must be an object with string values", .{});
+ return;
+ }
+ try this.map.ensureUnusedCapacity(lockfile.allocator, expr.data.e_object.properties.len);
+ for (expr.data.e_object.properties.slice()) |prop| {
+ const key = prop.key.?;
+ var k = key.asString(lockfile.allocator).?;
+ if (strings.hasPrefixComptime(k, "**/"))
+ k = k[3..];
+ if (k.len == 0) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Missing resolution package name", .{});
+ continue;
+ }
+ const value = prop.value.?;
+ if (value.data != .e_string) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Expected string value for resolution \"{s}\"", .{k});
+ continue;
+ }
+ // currently we only support one level deep, so we should error if there are more than one
+ // - "foo/bar":
+ // - "@namespace/hello/world"
+ if (k[0] == '@') {
+ const first_slash = strings.indexOfChar(k, '/') orelse {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Invalid package name \"{s}\"", .{k});
+ continue;
+ };
+ if (strings.indexOfChar(k[first_slash + 1 ..], '/') != null) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Bun currently does not support nested \"resolutions\"", .{});
+ continue;
+ }
+ } else if (strings.indexOfChar(k, '/') != null) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Bun currently does not support nested \"resolutions\"", .{});
+ continue;
+ }
+
+ if (try parseOverrideValue(
+ "resolution",
+ lockfile,
+ root_package,
+ source,
+ value.loc,
+ log,
+ k,
+ value.data.e_string.data,
+ builder,
+ )) |version| {
+ const name_hash = String.Builder.stringHash(k);
+ this.map.putAssumeCapacity(name_hash, version);
+ }
+ }
+ }
+
+ pub fn parseOverrideValue(
+ comptime field: []const u8,
+ lockfile: *Lockfile,
+ root_package: *Lockfile.Package,
+ source: logger.Source,
+ loc: logger.Loc,
+ log: *logger.Log,
+ key: []const u8,
+ value: []const u8,
+ builder: *Lockfile.StringBuilder,
+ ) !?Dependency {
+ if (value.len == 0) {
+ try log.addWarningFmt(&source, loc, lockfile.allocator, "Missing " ++ field ++ " value", .{});
+ return null;
+ }
+
+ // "Overrides may also be defined as a reference to a spec for a direct dependency
+ // by prefixing the name of the package you wish the version to match with a `$`"
+ // https://docs.npmjs.com/cli/v9/configuring-npm/package-json#overrides
+ // This is why a `*Lockfile.Package` is needed here.
+ if (value[0] == '$') {
+ const ref_name = value[1..];
+ // This is fine for this string to not share the string pool, because it's only used for .eql()
+ const ref_name_str = String.init(ref_name, ref_name);
+ const pkg_deps: []const Dependency = root_package.dependencies.get(lockfile.buffers.dependencies.items);
+ for (pkg_deps) |dep| {
+ if (dep.name.eql(ref_name_str, lockfile.buffers.string_bytes.items, ref_name)) {
+ return dep;
+ }
+ }
+ try log.addWarningFmt(&source, loc, lockfile.allocator, "Could not resolve " ++ field ++ " \"{s}\" (you need \"{s}\" in your dependencies)", .{ value, ref_name });
+ return null;
+ }
+
+ const literalString = builder.append(String, value);
+ const literalSliced = literalString.sliced(lockfile.buffers.string_bytes.items);
+
+ const name_hash = String.Builder.stringHash(key);
+ const name = builder.appendWithHash(String, key, name_hash);
+
+ return Dependency{
+ .name = name,
+ .name_hash = name_hash,
+ .version = Dependency.parse(
+ lockfile.allocator,
+ name,
+ literalSliced.slice,
+ &literalSliced,
+ log,
+ ) orelse {
+ try log.addWarningFmt(&source, loc, lockfile.allocator, "Invalid " ++ field ++ " value \"{s}\"", .{value});
+ return null;
+ },
+ };
+ }
+};
+
pub const FormatVersion = enum(u32) {
- v0,
+ v0 = 0,
// bun v0.0.x - bun v0.1.6
- v1,
+ v1 = 1,
// bun v0.1.7+
// This change added tarball URLs to npm-resolved packages
- v2,
+ v2 = 2,
+
_,
pub const current = FormatVersion.v2;
};
@@ -1821,7 +2241,7 @@ pub const Package = extern struct {
name: String = .{},
name_hash: PackageNameHash = 0,
- /// How a package has been resolved
+ /// How this package has been resolved
/// When .tag is uninitialized, that means the package is not resolved yet.
resolution: Resolution = .{},
@@ -1830,8 +2250,18 @@ pub const Package = extern struct {
/// if resolutions[i] is an invalid package ID, then dependencies[i] is not resolved
dependencies: DependencySlice = .{},
- /// The resolved package IDs for the dependencies
- resolutions: DependencyIDSlice = .{},
+ /// The resolved package IDs for this package's dependencies. Instead of storing this
+ /// on the `Dependency` struct within `.dependencies`, it is stored on the package itself
+ /// so we can access it faster.
+ ///
+ /// Each index in this array corresponds to the same index in dependencies.
+ /// Each value in this array corresponds to the resolved package ID for that dependency.
+ ///
+ /// So this is how you say "what package ID for lodash does this package actually resolve to?"
+ ///
+ /// By default, the underlying buffer is filled with "invalid_id" to indicate this package ID
+ /// was not resolved
+ resolutions: PackageIDSlice = .{},
meta: Meta = .{},
bin: Bin = .{},
@@ -1971,11 +2401,11 @@ pub const Package = extern struct {
field: string,
behavior: Behavior,
- pub const dependencies = DependencyGroup{ .prop = "dependencies", .field = "dependencies", .behavior = @as(Behavior, @enumFromInt(Behavior.normal)) };
- pub const dev = DependencyGroup{ .prop = "devDependencies", .field = "dev_dependencies", .behavior = @as(Behavior, @enumFromInt(Behavior.dev)) };
- pub const optional = DependencyGroup{ .prop = "optionalDependencies", .field = "optional_dependencies", .behavior = @as(Behavior, @enumFromInt(Behavior.optional)) };
- pub const peer = DependencyGroup{ .prop = "peerDependencies", .field = "peer_dependencies", .behavior = @as(Behavior, @enumFromInt(Behavior.peer)) };
- pub const workspaces = DependencyGroup{ .prop = "workspaces", .field = "workspaces", .behavior = @as(Behavior, @enumFromInt(Behavior.workspace)) };
+ pub const dependencies = DependencyGroup{ .prop = "dependencies", .field = "dependencies", .behavior = Behavior.normal };
+ pub const dev = DependencyGroup{ .prop = "devDependencies", .field = "dev_dependencies", .behavior = Behavior.dev };
+ pub const optional = DependencyGroup{ .prop = "optionalDependencies", .field = "optional_dependencies", .behavior = Behavior.optional };
+ pub const peer = DependencyGroup{ .prop = "peerDependencies", .field = "peer_dependencies", .behavior = Behavior.peer };
+ pub const workspaces = DependencyGroup{ .prop = "workspaces", .field = "workspaces", .behavior = Behavior.workspace };
};
pub inline fn isDisabled(this: *const Lockfile.Package) bool {
@@ -2409,6 +2839,7 @@ pub const Package = extern struct {
add: u32 = 0,
remove: u32 = 0,
update: u32 = 0,
+ overrides_changed: bool = false,
pub inline fn sum(this: *Summary, that: Summary) void {
this.add += that.add;
@@ -2417,7 +2848,7 @@ pub const Package = extern struct {
}
pub inline fn hasDiffs(this: Summary) bool {
- return this.add > 0 or this.remove > 0 or this.update > 0;
+ return this.add > 0 or this.remove > 0 or this.update > 0 or this.overrides_changed;
}
};
@@ -2436,6 +2867,23 @@ pub const Package = extern struct {
const from_deps = from.dependencies.get(from_lockfile.buffers.dependencies.items);
const from_resolutions = from.resolutions.get(from_lockfile.buffers.resolutions.items);
var to_i: usize = 0;
+ var skipped_workspaces: usize = 0;
+
+ if (from_lockfile.overrides.map.count() != to_lockfile.overrides.map.count()) {
+ summary.overrides_changed = true;
+ } else {
+ for (
+ from_lockfile.overrides.map.keys(),
+ from_lockfile.overrides.map.values(),
+ to_lockfile.overrides.map.keys(),
+ to_lockfile.overrides.map.values(),
+ ) |from_k, *from_override, to_k, *to_override| {
+ if ((from_k != to_k) or (!from_override.eql(to_override, from_lockfile.buffers.string_bytes.items, to_lockfile.buffers.string_bytes.items))) {
+ summary.overrides_changed = true;
+ break;
+ }
+ }
+ }
for (from_deps, 0..) |*from_dep, i| {
found: {
@@ -2454,6 +2902,11 @@ pub const Package = extern struct {
if (from_dep.name_hash == to_deps[to_i].name_hash) break :found;
}
+ if (PackageManager.instance.workspaces.contains(from_lockfile.str(&from_dep.name))) {
+ skipped_workspaces += 1;
+ continue;
+ }
+
// We found a removed dependency!
// We don't need to remove it
// It will be cleaned up later
@@ -2479,7 +2932,7 @@ pub const Package = extern struct {
if (id_mapping) |mapping| {
const version = to_deps[to_i].version;
if (switch (version.tag) {
- .workspace => if (to_lockfile.workspace_paths.getPtr(@truncate(from_dep.name_hash))) |path_ptr| brk: {
+ .workspace => if (to_lockfile.workspace_paths.getPtr(from_dep.name_hash)) |path_ptr| brk: {
const path = to_lockfile.str(path_ptr);
var file = std.fs.cwd().openFile(Path.join(
&[_]string{ path, "package.json" },
@@ -2521,7 +2974,7 @@ pub const Package = extern struct {
summary.update += 1;
}
- summary.add = @truncate(to_deps.len - (from_deps.len - summary.remove));
+ summary.add = @truncate((to_deps.len + skipped_workspaces) - (from_deps.len - summary.remove));
inline for (Package.Scripts.Hooks) |hook| {
if (!@field(to.scripts, hook).eql(
@@ -2567,7 +3020,6 @@ pub const Package = extern struct {
comptime features: Features,
) !void {
initializeStore();
-
const json = json_parser.ParseJSONUTF8(&source, log, allocator) catch |err| {
switch (Output.enable_ansi_colors) {
inline else => |enable_ansi_colors| {
@@ -2637,8 +3089,20 @@ pub const Package = extern struct {
} else external_alias.hash,
else => external_alias.hash,
};
- const workspace_path = if (comptime tag == null) lockfile.workspace_paths.get(@truncate(name_hash)) else null;
- const workspace_version = if (comptime tag == null) lockfile.workspace_versions.get(@truncate(name_hash)) else workspace_ver;
+
+ var workspace_path: ?String = null;
+ var workspace_version = workspace_ver;
+ if (comptime tag == null) {
+ workspace_path = lockfile.workspace_paths.get(name_hash);
+ workspace_version = lockfile.workspace_versions.get(name_hash);
+
+ if (workspace_path == null or workspace_version == null) {
+ if (PackageManager.instance.workspaces.get(lockfile.str(&external_alias.value))) |_workspace_version| {
+ workspace_path = external_alias.value;
+ workspace_version = _workspace_version;
+ }
+ }
+ }
switch (dependency_version.tag) {
.folder => {
@@ -2716,7 +3180,7 @@ pub const Package = extern struct {
dependency_version.literal = path;
dependency_version.value.workspace = path;
- var workspace_entry = try lockfile.workspace_paths.getOrPut(allocator, @truncate(name_hash));
+ var workspace_entry = try lockfile.workspace_paths.getOrPut(allocator, name_hash);
if (workspace_entry.found_existing) {
if (strings.eqlComptime(workspace, "*")) return null;
@@ -2733,7 +3197,7 @@ pub const Package = extern struct {
workspace_entry.value_ptr.* = path;
if (workspace_version) |ver| {
- try lockfile.workspace_versions.put(allocator, @truncate(name_hash), ver);
+ try lockfile.workspace_versions.put(allocator, name_hash, ver);
for (package_dependencies[0..dependencies_count]) |*package_dep| {
if (switch (package_dep.version.tag) {
@@ -3181,7 +3645,7 @@ pub const Package = extern struct {
a: usize,
b: usize,
) bool {
- return std.mem.order(u8, self.values[a].name, self.values[b].name) == .lt;
+ return strings.order(self.values[a].name, self.values[b].name) == .lt;
}
}{
.values = workspace_names.values(),
@@ -3356,15 +3820,7 @@ pub const Package = extern struct {
return error.InvalidPackageJSON;
}
for (obj.properties.slice()) |item| {
- const key = item.key.?.asString(allocator) orelse {
- log.addErrorFmt(&source, item.key.?.loc, allocator,
- \\{0s} expects a map of specifiers, e.g.
- \\"{0s}": {{
- \\ "bun": "latest"
- \\}}
- , .{group.prop}) catch {};
- return error.InvalidPackageJSON;
- };
+ const key = item.key.?.asString(allocator).?;
const value = item.value.?.asString(allocator) orelse {
log.addErrorFmt(&source, item.value.?.loc, allocator,
\\{0s} expects a map of specifiers, e.g.
@@ -3440,6 +3896,10 @@ pub const Package = extern struct {
}
}
+ if (comptime features.is_main) {
+ lockfile.overrides.parseCount(lockfile, json, &string_builder);
+ }
+
try string_builder.allocate();
try lockfile.buffers.dependencies.ensureUnusedCapacity(lockfile.allocator, total_dependencies_count);
try lockfile.buffers.resolutions.ensureUnusedCapacity(lockfile.allocator, total_dependencies_count);
@@ -3597,6 +4057,11 @@ pub const Package = extern struct {
)) |dep| {
package_dependencies[total_dependencies_count] = dep;
total_dependencies_count += 1;
+
+ try lockfile.workspace_paths.put(allocator, external_name.hash, dep.version.value.workspace);
+ if (entry.version) |v| {
+ try lockfile.workspace_versions.put(allocator, external_name.hash, v);
+ }
}
}
} else {
@@ -3656,24 +4121,34 @@ pub const Package = extern struct {
lockfile.buffers.dependencies.items = lockfile.buffers.dependencies.items.ptr[0..new_len];
lockfile.buffers.resolutions.items = lockfile.buffers.resolutions.items.ptr[0..new_len];
+ // This function depends on package.dependencies being set, so it is done at the very end.
+ if (comptime features.is_main) {
+ try lockfile.overrides.parseAppend(lockfile, package, log, source, json, &string_builder);
+ }
+
string_builder.clamp();
}
- pub const List = std.MultiArrayList(Lockfile.Package);
+ pub const List = bun.MultiArrayList(Lockfile.Package);
pub const Meta = extern struct {
+ // TODO: when we bump the lockfile version, we should reorder this to:
+ // id(32), arch(16), os(16), id(8), man_dir(8), integrity(72 align 8)
+ // should allow us to remove padding bytes
+
+ // TODO: remove origin. it doesnt do anything and can be inferred from the resolution
origin: Origin = Origin.npm,
_padding_origin: u8 = 0,
arch: Npm.Architecture = Npm.Architecture.all,
os: Npm.OperatingSystem = Npm.OperatingSystem.all,
-
_padding_os: u16 = 0,
id: PackageID = invalid_package_id,
man_dir: String = String{},
integrity: Integrity = Integrity{},
+ _padding_integrity: [3]u8 = .{0} ** 3,
/// Does the `cpu` arch and `os` match the requirements listed in the package?
/// This is completely unrelated to "devDependencies", "peerDependencies", "optionalDependencies" etc
@@ -3686,11 +4161,14 @@ pub const Package = extern struct {
}
pub fn clone(this: *const Meta, id: PackageID, buf: []const u8, comptime StringBuilderType: type, builder: StringBuilderType) Meta {
- var new = this.*;
- new.id = id;
- new.man_dir = builder.append(String, this.man_dir.slice(buf));
-
- return new;
+ return Meta{
+ .id = id,
+ .man_dir = builder.append(String, this.man_dir.slice(buf)),
+ .integrity = this.integrity,
+ .arch = this.arch,
+ .os = this.os,
+ .origin = this.origin,
+ };
}
};
@@ -3767,6 +4245,8 @@ pub const Package = extern struct {
inline for (FieldsEnum.fields) |field| {
const value = sliced.items(@field(Lockfile.Package.List.Field, field.name));
+ if (comptime Environment.allow_assert)
+ debug("save(\"{s}\") = {d} bytes", .{ field.name, std.mem.sliceAsBytes(value).len });
comptime assertNoUninitializedPadding(@TypeOf(value));
try writer.writeAll(std.mem.sliceAsBytes(value));
@@ -3848,16 +4328,20 @@ pub fn deinit(this: *Lockfile) void {
this.trusted_dependencies.deinit(this.allocator);
this.workspace_paths.deinit(this.allocator);
this.workspace_versions.deinit(this.allocator);
+ this.overrides.deinit(this.allocator);
}
const Buffers = struct {
trees: Tree.List = .{},
hoisted_dependencies: DependencyIDList = .{},
+ /// This is the underlying buffer used for the `resolutions` external slices inside of `Package`
+ /// Should be the same length as `dependencies`
resolutions: PackageIDList = .{},
+ /// This is the underlying buffer used for the `dependencies` external slices inside of `Package`
dependencies: DependencyList = .{},
+ /// This is the underlying buffer used for any `Semver.ExternalString` instance in the lockfile
extern_strings: ExternalStringBuffer = .{},
- // node_modules_folders: NodeModulesFolderList = NodeModulesFolderList{},
- // node_modules_package_ids: PackageIDList = PackageIDList{},
+ /// This is where all non-inlinable `Semver.String`s are stored.
string_bytes: StringBuffer = .{},
pub fn deinit(this: *Buffers, allocator: Allocator) void {
@@ -4147,6 +4631,10 @@ pub const Serializer = struct {
pub const version = "bun-lockfile-format-v0\n";
const header_bytes: string = "#!/usr/bin/env bun\n" ++ version;
+ const has_workspace_package_ids_tag: u64 = @bitCast([_]u8{ 'w', 'O', 'r', 'K', 's', 'P', 'a', 'C' });
+ const has_trusted_dependencies_tag: u64 = @bitCast([_]u8{ 't', 'R', 'u', 'S', 't', 'E', 'D', 'd' });
+ const has_overrides_tag: u64 = @bitCast([_]u8{ 'o', 'V', 'e', 'R', 'r', 'i', 'D', 's' });
+
pub fn save(this: *Lockfile, comptime StreamType: type, stream: StreamType) !void {
var old_package_list = this.packages;
this.packages = try this.packages.clone(z_allocator);
@@ -4164,6 +4652,90 @@ pub const Serializer = struct {
try Lockfile.Package.Serializer.save(this.packages, StreamType, stream, @TypeOf(&writer), &writer);
try Lockfile.Buffers.save(this.buffers, z_allocator, StreamType, stream, @TypeOf(&writer), &writer);
try writer.writeIntLittle(u64, 0);
+
+ // < Bun v1.0.4 stopped right here when reading the lockfile
+ // So we add an extra 8 byte tag to say "hey, there's more data here"
+ if (this.workspace_versions.count() > 0) {
+ try writer.writeAll(std.mem.asBytes(&has_workspace_package_ids_tag));
+
+ // We need to track the "version" field in "package.json" of workspace member packages
+ // We do not necessarily have that in the Resolution struct. So we store it here.
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []PackageNameHash,
+ this.workspace_versions.keys(),
+ );
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []Semver.Version,
+ this.workspace_versions.values(),
+ );
+
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []PackageNameHash,
+ this.workspace_paths.keys(),
+ );
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []String,
+ this.workspace_paths.values(),
+ );
+ }
+
+ if (this.trusted_dependencies.count() > 0) {
+ try writer.writeAll(std.mem.asBytes(&has_trusted_dependencies_tag));
+
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []u32,
+ this.trusted_dependencies.keys(),
+ );
+ }
+
+ if (this.overrides.map.count() > 0) {
+ try writer.writeAll(std.mem.asBytes(&has_overrides_tag));
+
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []PackageNameHash,
+ this.overrides.map.keys(),
+ );
+ var external_overrides = try std.ArrayListUnmanaged(Dependency.External).initCapacity(z_allocator, this.overrides.map.count());
+ defer external_overrides.deinit(z_allocator);
+ external_overrides.items.len = this.overrides.map.count();
+ for (external_overrides.items, this.overrides.map.values()) |*dest, src| {
+ dest.* = src.toExternal();
+ }
+
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []Dependency.External,
+ external_overrides.items,
+ );
+ }
+
const end = try stream.getPos();
try writer.writeAll(&alignment_bytes_to_repeat_buffer);
@@ -4210,29 +4782,161 @@ pub const Serializer = struct {
return error.@"Lockfile is malformed (expected 0 at the end)";
}
- if (comptime Environment.allow_assert) std.debug.assert(stream.pos == total_buffer_size);
+ var has_workspace_name_hashes = false;
+ // < Bun v1.0.4 stopped right here when reading the lockfile
+ // So we add an extra 8 byte tag to say "hey, there's more data here"
+ {
+ const remaining_in_buffer = total_buffer_size -| stream.pos;
+
+ if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) {
+ const next_num = try reader.readIntLittle(u64);
+ if (next_num == has_workspace_package_ids_tag) {
+ {
+ var workspace_package_name_hashes = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(PackageNameHash),
+ );
+ defer workspace_package_name_hashes.deinit(allocator);
- lockfile.scratch = Lockfile.Scratch.init(allocator);
+ var workspace_versions_list = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(Semver.Version),
+ );
+ comptime {
+ if (PackageNameHash != @TypeOf((VersionHashMap.KV{ .key = undefined, .value = undefined }).key)) {
+ @compileError("VersionHashMap must be in sync with serialization");
+ }
+ if (Semver.Version != @TypeOf((VersionHashMap.KV{ .key = undefined, .value = undefined }).value)) {
+ @compileError("VersionHashMap must be in sync with serialization");
+ }
+ }
+ defer workspace_versions_list.deinit(allocator);
+ try lockfile.workspace_versions.ensureTotalCapacity(allocator, workspace_versions_list.items.len);
+ lockfile.workspace_versions.entries.len = workspace_versions_list.items.len;
+ @memcpy(lockfile.workspace_versions.keys(), workspace_package_name_hashes.items);
+ @memcpy(lockfile.workspace_versions.values(), workspace_versions_list.items);
+ try lockfile.workspace_versions.reIndex(allocator);
+ }
+
+ {
+ var workspace_paths_hashes = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(PackageNameHash),
+ );
+ defer workspace_paths_hashes.deinit(allocator);
+ var workspace_paths_strings = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(String),
+ );
+ defer workspace_paths_strings.deinit(allocator);
+
+ try lockfile.workspace_paths.ensureTotalCapacity(allocator, workspace_paths_strings.items.len);
+
+ lockfile.workspace_paths.entries.len = workspace_paths_strings.items.len;
+ @memcpy(lockfile.workspace_paths.keys(), workspace_paths_hashes.items);
+ @memcpy(lockfile.workspace_paths.values(), workspace_paths_strings.items);
+ try lockfile.workspace_paths.reIndex(allocator);
+ }
+ } else {
+ stream.pos -= 8;
+ }
+ }
+ }
{
- lockfile.package_index = PackageIndex.Map.initContext(allocator, .{});
- lockfile.string_pool = StringPool.initContext(allocator, .{});
- try lockfile.package_index.ensureTotalCapacity(@as(u32, @truncate(lockfile.packages.len)));
+ const remaining_in_buffer = total_buffer_size -| stream.pos;
+
+ if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) {
+ const next_num = try reader.readIntLittle(u64);
+ if (next_num == has_trusted_dependencies_tag) {
+ var trusted_dependencies_hashes = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(u32),
+ );
+ defer trusted_dependencies_hashes.deinit(allocator);
+
+ try lockfile.trusted_dependencies.ensureTotalCapacity(allocator, trusted_dependencies_hashes.items.len);
+
+ lockfile.trusted_dependencies.entries.len = trusted_dependencies_hashes.items.len;
+ @memcpy(lockfile.trusted_dependencies.keys(), trusted_dependencies_hashes.items);
+ try lockfile.trusted_dependencies.reIndex(allocator);
+ } else {
+ stream.pos -= 8;
+ }
+ }
+ }
+
+ {
+ const remaining_in_buffer = total_buffer_size -| stream.pos;
+
+ if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) {
+ const next_num = try reader.readIntLittle(u64);
+ if (next_num == has_overrides_tag) {
+ var overrides_name_hashes = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(PackageNameHash),
+ );
+ defer overrides_name_hashes.deinit(allocator);
+
+ var map = lockfile.overrides.map;
+ defer lockfile.overrides.map = map;
+
+ try map.ensureTotalCapacity(allocator, overrides_name_hashes.items.len);
+ var override_versions_external = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(Dependency.External),
+ );
+ const context: Dependency.Context = .{
+ .allocator = allocator,
+ .log = log,
+ .buffer = lockfile.buffers.string_bytes.items,
+ };
+ for (overrides_name_hashes.items, override_versions_external.items) |name, value| {
+ map.putAssumeCapacity(name, Dependency.toDependency(value, context));
+ }
+ } else {
+ stream.pos -= 8;
+ }
+ }
+ }
+
+ lockfile.scratch = Lockfile.Scratch.init(allocator);
+ lockfile.package_index = PackageIndex.Map.initContext(allocator, .{});
+ lockfile.string_pool = StringPool.initContext(allocator, .{});
+ try lockfile.package_index.ensureTotalCapacity(@as(u32, @truncate(lockfile.packages.len)));
+
+ if (!has_workspace_name_hashes) {
const slice = lockfile.packages.slice();
const name_hashes = slice.items(.name_hash);
const resolutions = slice.items(.resolution);
for (name_hashes, resolutions, 0..) |name_hash, resolution, id| {
try lockfile.getOrPutID(@as(PackageID, @truncate(id)), name_hash);
+ // compatibility with < Bun v1.0.4
switch (resolution.tag) {
.workspace => {
- try lockfile.workspace_paths.put(allocator, @as(u32, @truncate(name_hash)), resolution.value.workspace);
+ try lockfile.workspace_paths.put(allocator, name_hash, resolution.value.workspace);
},
else => {},
}
}
+ } else {
+ const slice = lockfile.packages.slice();
+ const name_hashes = slice.items(.name_hash);
+ for (name_hashes, 0..) |name_hash, id| {
+ try lockfile.getOrPutID(@as(PackageID, @truncate(id)), name_hash);
+ }
}
+ if (comptime Environment.allow_assert) std.debug.assert(stream.pos == total_buffer_size);
+
// const end = try reader.readIntLittle(u64);
}
};
@@ -4242,7 +4946,7 @@ pub fn hasMetaHashChanged(this: *Lockfile, print_name_version_string: bool) !boo
this.meta_hash = try this.generateMetaHash(print_name_version_string);
return !strings.eqlLong(&previous_meta_hash, &this.meta_hash, false);
}
-fn generateMetaHash(this: *Lockfile, print_name_version_string: bool) !MetaHash {
+pub fn generateMetaHash(this: *Lockfile, print_name_version_string: bool) !MetaHash {
if (this.packages.len <= 1)
return zero_hash;
@@ -4402,3 +5106,294 @@ pub fn hasTrustedDependency(this: *Lockfile, name: []const u8) bool {
return default_trusted_dependencies.has(name);
}
}
+
+pub fn jsonStringifyDependency(this: *const Lockfile, w: anytype, dep: Dependency, res: ?PackageID) !void {
+ const sb = this.buffers.string_bytes.items;
+ var buf: [2048]u8 = undefined;
+
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("literal");
+ try w.write(dep.version.literal.slice(sb));
+
+ try w.objectField(@tagName(dep.version.tag));
+ switch (dep.version.tag) {
+ .uninitialized => try w.write(null),
+ .npm => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Dependency.Version.NpmInfo = dep.version.value.npm;
+
+ try w.objectField("name");
+ try w.write(info.name.slice(sb));
+
+ try w.objectField("version");
+ try w.write(try std.fmt.bufPrint(&buf, "{}", .{info.version}));
+ },
+ .dist_tag => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Dependency.Version.TagInfo = dep.version.value.dist_tag;
+
+ try w.objectField("name");
+ try w.write(info.name.slice(sb));
+
+ try w.objectField("tag");
+ try w.write(info.name.slice(sb));
+ },
+ .tarball => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Dependency.Version.TarballInfo = dep.version.value.tarball;
+ try w.objectField(@tagName(info.uri));
+ try w.write(switch (info.uri) {
+ inline else => |s| s.slice(sb),
+ });
+
+ try w.objectField("package_name");
+ try w.write(info.package_name.slice(sb));
+ },
+ .folder => {
+ try w.write(dep.version.value.folder.slice(sb));
+ },
+ .symlink => {
+ try w.write(dep.version.value.symlink.slice(sb));
+ },
+ .workspace => {
+ try w.write(dep.version.value.workspace.slice(sb));
+ },
+ .git => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Repository = dep.version.value.git;
+
+ try w.objectField("owner");
+ try w.write(info.owner.slice(sb));
+ try w.objectField("repo");
+ try w.write(info.repo.slice(sb));
+ try w.objectField("committish");
+ try w.write(info.committish.slice(sb));
+ try w.objectField("resolved");
+ try w.write(info.resolved.slice(sb));
+ try w.objectField("package_name");
+ try w.write(info.package_name.slice(sb));
+ },
+ .github => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Repository = dep.version.value.github;
+
+ try w.objectField("owner");
+ try w.write(info.owner.slice(sb));
+ try w.objectField("repo");
+ try w.write(info.repo.slice(sb));
+ try w.objectField("committish");
+ try w.write(info.committish.slice(sb));
+ try w.objectField("resolved");
+ try w.write(info.resolved.slice(sb));
+ try w.objectField("package_name");
+ try w.write(info.package_name.slice(sb));
+ },
+ }
+
+ try w.objectField("resolved_id");
+ try w.write(if (res) |r| if (r == invalid_package_id) null else r else null);
+
+ const behavior = try std.fmt.bufPrint(&buf, "{}", .{dep.behavior});
+ try w.objectField("behavior");
+ try w.write(behavior);
+}
+
+pub fn jsonStringify(this: *const Lockfile, w: anytype) !void {
+ var buf: [2048]u8 = undefined;
+ const sb = this.buffers.string_bytes.items;
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("format");
+ try w.write(@tagName(this.format));
+ try w.objectField("meta_hash");
+ try w.write(std.fmt.bytesToHex(this.meta_hash, .lower));
+
+ {
+ try w.objectField("package_index");
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ var iter = this.package_index.iterator();
+ while (iter.next()) |it| {
+ const entry: PackageIndex.Entry = it.value_ptr.*;
+ const first_id = switch (entry) {
+ .PackageID => |id| id,
+ .PackageIDMultiple => |ids| ids.items[0],
+ };
+ const name = this.packages.items(.name)[first_id].slice(sb);
+ try w.objectField(name);
+ switch (entry) {
+ .PackageID => |id| try w.write(id),
+ .PackageIDMultiple => |ids| {
+ try w.beginArray();
+ for (ids.items) |id| {
+ try w.write(id);
+ }
+ try w.endArray();
+ },
+ }
+ }
+ }
+ {
+ try w.objectField("packages");
+ try w.beginArray();
+ defer w.endArray() catch {};
+
+ for (0..this.packages.len) |i| {
+ const pkg: Package = this.packages.get(i);
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("id");
+ try w.write(i);
+
+ try w.objectField("name");
+ try w.write(pkg.name.slice(sb));
+
+ try w.objectField("name_hash");
+ try w.write(pkg.name_hash);
+
+ try w.objectField("resolution");
+ if (pkg.resolution.tag == .uninitialized) {
+ try w.write(null);
+ } else {
+ const b = try std.fmt.bufPrint(&buf, "{s} {s}", .{ @tagName(pkg.resolution.tag), pkg.resolution.fmt(sb) });
+ try w.write(b);
+ }
+
+ try w.objectField("dependencies");
+ {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ for (pkg.dependencies.get(this.buffers.dependencies.items), pkg.resolutions.get(this.buffers.resolutions.items)) |dep_, res| {
+ const dep: Dependency = dep_;
+ try w.objectField(dep.name.slice(sb));
+ try this.jsonStringifyDependency(w, dep, res);
+ }
+ }
+
+ if (@as(u16, @intFromEnum(pkg.meta.arch)) != Npm.Architecture.all_value) {
+ try w.objectField("arch");
+ try w.beginArray();
+ defer w.endArray() catch {};
+
+ for (Npm.Architecture.NameMap.kvs) |kv| {
+ if (pkg.meta.arch.has(kv.value)) {
+ try w.write(kv.key);
+ }
+ }
+ }
+
+ if (@as(u16, @intFromEnum(pkg.meta.os)) != Npm.OperatingSystem.all_value) {
+ try w.objectField("os");
+ try w.beginArray();
+ defer w.endArray() catch {};
+
+ for (Npm.OperatingSystem.NameMap.kvs) |kv| {
+ if (pkg.meta.os.has(kv.value)) {
+ try w.write(kv.key);
+ }
+ }
+ }
+
+ try w.objectField("integrity");
+ if (pkg.meta.integrity.tag != .unknown) {
+ try w.write(try std.fmt.bufPrint(&buf, "{}", .{pkg.meta.integrity}));
+ } else {
+ try w.write(null);
+ }
+
+ try w.objectField("man_dir");
+ try w.write(pkg.meta.man_dir.slice(sb));
+
+ try w.objectField("origin");
+ try w.write(@tagName(pkg.meta.origin));
+
+ try w.objectField("bin");
+ switch (pkg.bin.tag) {
+ .none => try w.write(null),
+ .file => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("file");
+ try w.write(pkg.bin.value.file.slice(sb));
+ },
+ .named_file => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("name");
+ try w.write(pkg.bin.value.named_file[0].slice(sb));
+
+ try w.objectField("file");
+ try w.write(pkg.bin.value.named_file[1].slice(sb));
+ },
+ .dir => {
+ try w.objectField("dir");
+ try w.write(pkg.bin.value.dir.slice(sb));
+ },
+ .map => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const data: []const ExternalString = pkg.bin.value.map.get(this.buffers.extern_strings.items);
+ var bin_i: usize = 0;
+ while (bin_i < data.len) : (bin_i += 2) {
+ try w.objectField(data[bin_i].slice(sb));
+ try w.write(data[bin_i + 1].slice(sb));
+ }
+ },
+ }
+
+ {
+ try w.objectField("scripts");
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ inline for (comptime std.meta.fieldNames(Lockfile.Scripts)) |field_name| {
+ var script = @field(pkg.scripts, field_name).slice(sb);
+ if (script.len > 0) {
+ try w.objectField(field_name);
+ try w.write(script);
+ }
+ }
+ }
+ }
+ }
+
+ try w.objectField("workspace_paths");
+ {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ for (this.workspace_paths.keys(), this.workspace_paths.values()) |k, v| {
+ try w.objectField(try std.fmt.bufPrint(&buf, "{d}", .{k}));
+ try w.write(v.slice(sb));
+ }
+ }
+ try w.objectField("workspace_versions");
+ {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ for (this.workspace_versions.keys(), this.workspace_versions.values()) |k, v| {
+ try w.objectField(try std.fmt.bufPrint(&buf, "{d}", .{k}));
+ try w.write(try std.fmt.bufPrint(&buf, "{}", .{v.fmt(sb)}));
+ }
+ }
+}
diff --git a/src/install/migration.zig b/src/install/migration.zig
new file mode 100644
index 000000000..d74be7265
--- /dev/null
+++ b/src/install/migration.zig
@@ -0,0 +1,947 @@
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+
+const bun = @import("root").bun;
+const string = bun.string;
+const Output = bun.Output;
+const Global = bun.Global;
+const Environment = bun.Environment;
+const strings = bun.strings;
+const MutableString = bun.MutableString;
+const stringZ = bun.stringZ;
+const logger = bun.logger;
+
+const Install = @import("./install.zig");
+const Resolution = @import("./resolution.zig").Resolution;
+const Dependency = @import("./dependency.zig");
+const VersionedURL = @import("./versioned_url.zig");
+const Npm = @import("./npm.zig");
+const Integrity = @import("./integrity.zig").Integrity;
+const Bin = @import("./bin.zig").Bin;
+
+const Semver = @import("./semver.zig");
+const String = Semver.String;
+const ExternalString = Semver.ExternalString;
+const stringHash = String.Builder.stringHash;
+
+const Lockfile = @import("./lockfile.zig");
+const LoadFromDiskResult = Lockfile.LoadFromDiskResult;
+
+const JSAst = bun.JSAst;
+const Expr = JSAst.Expr;
+const B = JSAst.B;
+const E = JSAst.E;
+const G = JSAst.G;
+const S = JSAst.S;
+
+const debug = Output.scoped(.migrate, false);
+
+pub fn detectAndLoadOtherLockfile(this: *Lockfile, allocator: Allocator, log: *logger.Log, bun_lockfile_path: stringZ) LoadFromDiskResult {
+ const dirname = bun_lockfile_path[0 .. strings.lastIndexOfChar(bun_lockfile_path, '/') orelse 0];
+ // check for package-lock.json, yarn.lock, etc...
+ // if it exists, do an in-memory migration
+ var buf: [bun.MAX_PATH_BYTES]u8 = undefined;
+ @memcpy(buf[0..dirname.len], dirname);
+
+ const cwd = std.fs.cwd();
+
+ npm: {
+ const npm_lockfile_name = "package-lock.json";
+ @memcpy(buf[dirname.len .. dirname.len + npm_lockfile_name.len], npm_lockfile_name);
+ buf[dirname.len + npm_lockfile_name.len] = 0;
+ const lockfile_path = buf[0 .. dirname.len + npm_lockfile_name.len :0];
+ var timer = std.time.Timer.start() catch unreachable;
+ const file = cwd.openFileZ(lockfile_path, .{ .mode = .read_only }) catch break :npm;
+ defer file.close();
+ var data = file.readToEndAlloc(allocator, std.math.maxInt(usize)) catch |err| {
+ return LoadFromDiskResult{ .err = .{ .step = .migrating, .value = err } };
+ };
+ const lockfile = migrateNPMLockfile(this, allocator, log, data, lockfile_path) catch |err| {
+ if (err == error.NPMLockfileVersionMismatch) {
+ Output.prettyErrorln(
+ \\<red><b>error<r><d>:<r> Please upgrade package-lock.json to lockfileVersion 3
+ \\
+ \\Run 'npm i --lockfile-version 3 --frozen-lockfile' to upgrade your lockfile without changing dependencies.
+ , .{});
+ Global.exit(1);
+ }
+ if (Environment.allow_assert) {
+ const maybe_trace = @errorReturnTrace();
+ Output.prettyErrorln("Error: {s}", .{@errorName(err)});
+ log.printForLogLevel(Output.errorWriter()) catch {};
+ if (maybe_trace) |trace| {
+ std.debug.dumpStackTrace(trace.*);
+ }
+ Output.prettyErrorln("Invalid NPM package-lock.json\nIn a release build, this would ignore and do a fresh install.\nAborting", .{});
+ Global.exit(1);
+ }
+ return LoadFromDiskResult{ .err = .{ .step = .migrating, .value = err } };
+ };
+
+ if (lockfile == .ok) {
+ Output.printElapsed(@as(f64, @floatFromInt(timer.read())) / std.time.ns_per_ms);
+ Output.prettyError(" ", .{});
+ Output.prettyErrorln("<d>migrated lockfile from <r><green>package-lock.json<r>", .{});
+ Output.flush();
+ }
+
+ return lockfile;
+ }
+
+ return LoadFromDiskResult{ .not_found = {} };
+}
+
+const IdMap = std.StringHashMapUnmanaged(IdMapValue);
+const IdMapValue = struct {
+ /// index into the old package-lock.json package entries.
+ old_json_index: u32,
+ /// this is the new package id for the bun lockfile
+ ///
+ /// - if this new_package_id is set to `package_id_is_link`, it means it's a link
+ /// and to get the actual package id, you need to lookup `.resolved` in the hashmap.
+ /// - if it is `package_id_is_bundled`, it means it's a bundled dependency that was not
+ /// marked by npm, which can happen to some transitive dependencies.
+ new_package_id: u32,
+};
+const package_id_is_link = std.math.maxInt(u32);
+const package_id_is_bundled = std.math.maxInt(u32) - 1;
+
+const unset_package_id = Install.invalid_package_id - 1;
+
+const dependency_keys = .{
+ .dependencies,
+ .devDependencies,
+ .peerDependencies,
+ .optionalDependencies,
+};
+
+pub fn migrateNPMLockfile(this: *Lockfile, allocator: Allocator, log: *logger.Log, data: string, path: string) !LoadFromDiskResult {
+ debug("begin lockfile migration", .{});
+
+ try this.initEmpty(allocator);
+ Install.initializeStore();
+
+ const json_src = logger.Source.initPathString(path, data);
+ const json = bun.JSON.ParseJSONUTF8(&json_src, log, allocator) catch return error.InvalidNPMLockfile;
+
+ if (json.data != .e_object) {
+ return error.InvalidNPMLockfile;
+ }
+ if (json.get("lockfileVersion")) |version| {
+ if (!(version.data == .e_number and version.data.e_number.value == 3)) {
+ return error.NPMLockfileVersionMismatch;
+ }
+ } else {
+ return error.InvalidNPMLockfile;
+ }
+
+ // Count pass
+ var builder_ = this.stringBuilder();
+ var builder = &builder_;
+ const name = (if (json.get("name")) |expr| expr.asString(allocator) else null) orelse "";
+ builder.count(name);
+
+ var root_package: *E.Object = undefined;
+ var packages_properties = brk: {
+ const obj = json.get("packages") orelse return error.InvalidNPMLockfile;
+ if (obj.data != .e_object) return error.InvalidNPMLockfile;
+ if (obj.data.e_object.properties.len == 0) return error.InvalidNPMLockfile;
+ const prop1 = obj.data.e_object.properties.at(0);
+ if (prop1.key) |k| {
+ if (k.data != .e_string) return error.InvalidNPMLockfile;
+ // first key must be the "", self reference
+ if (k.data.e_string.data.len != 0) return error.InvalidNPMLockfile;
+ if (prop1.value.?.data != .e_object) return error.InvalidNPMLockfile;
+ root_package = prop1.value.?.data.e_object;
+ } else return error.InvalidNPMLockfile;
+ break :brk obj.data.e_object.properties;
+ };
+
+ var num_deps: u32 = 0;
+
+ const workspace_map: ?Lockfile.Package.WorkspaceMap = workspace_map: {
+ if (root_package.get("workspaces")) |wksp| {
+ var workspaces = Lockfile.Package.WorkspaceMap.init(allocator);
+
+ const json_array = switch (wksp.data) {
+ .e_array => |arr| arr,
+ .e_object => |obj| if (obj.get("packages")) |packages| switch (packages.data) {
+ .e_array => |arr| arr,
+ else => return error.InvalidNPMLockfile,
+ } else return error.InvalidNPMLockfile,
+ else => return error.InvalidNPMLockfile,
+ };
+
+ const workspace_packages_count = try Lockfile.Package.processWorkspaceNamesArray(
+ &workspaces,
+ allocator,
+ log,
+ json_array,
+ &json_src,
+ wksp.loc,
+ builder,
+ );
+ debug("found {d} workspace packages", .{workspace_packages_count});
+ num_deps += workspace_packages_count;
+ break :workspace_map workspaces;
+ }
+ break :workspace_map null;
+ };
+
+ // Counting Phase
+ // This "IdMap" is used to make object key lookups faster for the `packages` object
+ // it also lets us resolve linked and bundled packages.
+ var id_map = IdMap{};
+ try id_map.ensureTotalCapacity(allocator, packages_properties.len);
+ var num_extern_strings: u32 = 0;
+ var package_idx: u32 = 0;
+ for (packages_properties.slice(), 0..) |entry, i| {
+ const pkg_path = entry.key.?.asString(allocator).?;
+ if (entry.value.?.data != .e_object)
+ return error.InvalidNPMLockfile;
+
+ const pkg = entry.value.?.data.e_object;
+
+ if (pkg.get("link") != null) {
+ id_map.putAssumeCapacity(
+ pkg_path,
+ IdMapValue{
+ .old_json_index = @truncate(i),
+ .new_package_id = package_id_is_link,
+ },
+ );
+ continue;
+ }
+ if (pkg.get("inBundle")) |x| if (x.data == .e_boolean and x.data.e_boolean.value) {
+ id_map.putAssumeCapacity(
+ pkg_path,
+ IdMapValue{
+ .old_json_index = @truncate(i),
+ .new_package_id = package_id_is_bundled,
+ },
+ );
+ continue;
+ };
+ if (pkg.get("extraneous")) |x| if (x.data == .e_boolean and x.data.e_boolean.value) {
+ continue;
+ };
+
+ id_map.putAssumeCapacity(
+ pkg_path,
+ IdMapValue{
+ .old_json_index = @truncate(i),
+ .new_package_id = package_idx,
+ },
+ );
+ package_idx += 1;
+
+ inline for (dependency_keys) |dep_key| {
+ if (pkg.get(@tagName(dep_key))) |deps| {
+ if (deps.data != .e_object) {
+ return error.InvalidNPMLockfile;
+ }
+ num_deps +|= @as(u32, deps.data.e_object.properties.len);
+
+ for (deps.data.e_object.properties.slice()) |dep| {
+ const dep_name = dep.key.?.asString(allocator).?;
+ const version_string = dep.value.?.asString(allocator) orelse return error.InvalidNPMLockfile;
+
+ builder.count(dep_name);
+ builder.count(version_string);
+
+ // If it's a folder or workspace, pessimistically assume we will need a maximum path
+ switch (Dependency.Version.Tag.infer(version_string)) {
+ .folder, .workspace => builder.cap += bun.MAX_PATH_BYTES,
+ else => {},
+ }
+ }
+ }
+ }
+
+ if (pkg.get("bin")) |bin| {
+ if (bin.data != .e_object) return error.InvalidNPMLockfile;
+ switch (bin.data.e_object.properties.len) {
+ 0 => return error.InvalidNPMLockfile,
+ 1 => {
+ const first_bin = bin.data.e_object.properties.at(0);
+ const key = first_bin.key.?.asString(allocator).?;
+
+ const workspace_entry = if (workspace_map) |map| map.map.get(pkg_path) else null;
+ const is_workspace = workspace_entry != null;
+
+ const pkg_name = if (is_workspace)
+ workspace_entry.?.name
+ else if (entry.value.?.get("name")) |set_name|
+ (set_name.asString(this.allocator) orelse return error.InvalidNPMLockfile)
+ else
+ packageNameFromPath(pkg_path);
+
+ if (!strings.eql(key, pkg_name)) {
+ builder.count(key);
+ }
+ builder.count(first_bin.value.?.asString(allocator) orelse return error.InvalidNPMLockfile);
+ },
+ else => {
+ for (bin.data.e_object.properties.slice()) |bin_entry| {
+ builder.count(bin_entry.key.?.asString(allocator).?);
+ builder.count(bin_entry.value.?.asString(allocator) orelse return error.InvalidNPMLockfile);
+ }
+ num_extern_strings += @truncate(bin.data.e_object.properties.len * 2);
+ },
+ }
+ }
+
+ if (pkg.get("resolved")) |resolved_expr| {
+ const resolved = resolved_expr.asString(allocator) orelse return error.InvalidNPMLockfile;
+ if (strings.hasPrefixComptime(resolved, "file:")) {
+ builder.count(resolved[5..]);
+ } else if (strings.hasPrefixComptime(resolved, "git+")) {
+ builder.count(resolved[4..]);
+ } else {
+ builder.count(resolved);
+
+ // this is over-counting but whatever. it would be too hard to determine if the case here
+ // is an `npm`/`dist_tag` version (the only times this is actually used)
+ if (pkg.get("version")) |v| if (v.asString(allocator)) |s| {
+ builder.count(s);
+ };
+ }
+ } else {
+ builder.count(pkg_path);
+ }
+ }
+ if (num_deps == std.math.maxInt(u32)) return error.InvalidNPMLockfile; // lol
+
+ debug("counted {d} dependencies", .{num_deps});
+ debug("counted {d} extern strings", .{num_extern_strings});
+ debug("counted {d} packages", .{package_idx});
+
+ try this.buffers.dependencies.ensureTotalCapacity(allocator, num_deps);
+ try this.buffers.resolutions.ensureTotalCapacity(allocator, num_deps);
+ try this.buffers.extern_strings.ensureTotalCapacity(allocator, num_extern_strings);
+ try this.packages.ensureTotalCapacity(allocator, package_idx);
+ // The package index is overallocated, but we know the upper bound
+ try this.package_index.ensureTotalCapacity(package_idx);
+ try builder.allocate();
+
+ if (workspace_map) |wksp| {
+ try this.workspace_paths.ensureTotalCapacity(allocator, wksp.map.unmanaged.entries.len);
+ try this.workspace_versions.ensureTotalCapacity(allocator, wksp.map.unmanaged.entries.len);
+
+ for (wksp.map.keys(), wksp.map.values()) |k, v| {
+ const name_hash = stringHash(v.name);
+ this.workspace_paths.putAssumeCapacity(name_hash, builder.append(String, k));
+ if (v.version) |version| this.workspace_versions.putAssumeCapacity(name_hash, version);
+ }
+ }
+
+ // Package Building Phase
+ // This initializes every package and sets the resolution to uninitialized
+ for (packages_properties.slice()) |entry| {
+ // this pass is allowed to make more assumptions because we already checked things during
+ // the counting pass
+ const pkg = entry.value.?.data.e_object;
+
+ if (pkg.get("link") != null or if (pkg.get("inBundle") orelse pkg.get("extraneous")) |x| x.data == .e_boolean and x.data.e_boolean.value else false) continue;
+
+ const pkg_path = entry.key.?.asString(allocator).?;
+
+ const workspace_entry = if (workspace_map) |map| map.map.get(pkg_path) else null;
+ const is_workspace = workspace_entry != null;
+
+ const pkg_name = if (is_workspace)
+ workspace_entry.?.name
+ else if (pkg.get("name")) |set_name|
+ (set_name.asString(this.allocator) orelse unreachable)
+ else
+ packageNameFromPath(pkg_path);
+
+ const name_hash = stringHash(pkg_name);
+
+ const package_id: Install.PackageID = @intCast(this.packages.len);
+ if (Environment.allow_assert) {
+ // If this is false, then it means we wrote wrong resolved ids
+ // During counting phase we assign all the packages an id.
+ std.debug.assert(package_id == id_map.get(pkg_path).?.new_package_id);
+ }
+
+ // Instead of calling this.appendPackage, manually append
+ // the other function has some checks that will fail since we have not set resolution+dependencies yet.
+ this.packages.appendAssumeCapacity(Lockfile.Package{
+ .name = builder.appendWithHash(String, pkg_name, name_hash),
+ .name_hash = name_hash,
+
+ // For non workspace packages these are set to .uninitialized, then in the third phase
+ // they are resolved. This is because the resolution uses the dependant's version
+ // specifier as a "hint" to resolve the dependency.
+ .resolution = if (is_workspace) Resolution.init(.{
+ // This string is counted by `processWorkspaceNamesArray`
+ .workspace = builder.append(String, pkg_path),
+ }) else Resolution{},
+
+ // we fill this data in later
+ .dependencies = undefined,
+ .resolutions = undefined,
+
+ .meta = .{
+ .id = package_id,
+
+ .origin = if (package_id == 0) .local else .npm,
+
+ .arch = if (pkg.get("cpu")) |cpu_array| arch: {
+ if (cpu_array.data != .e_array) return error.InvalidNPMLockfile;
+ var arch: Npm.Architecture = .none;
+ for (cpu_array.data.e_array.items.slice()) |item| {
+ if (item.data != .e_string) return error.InvalidNPMLockfile;
+ arch = arch.apply(item.data.e_string.data);
+ }
+ break :arch arch;
+ } else .all,
+
+ .os = if (pkg.get("os")) |cpu_array| arch: {
+ if (cpu_array.data != .e_array) return error.InvalidNPMLockfile;
+ var os: Npm.OperatingSystem = .none;
+ for (cpu_array.data.e_array.items.slice()) |item| {
+ if (item.data != .e_string) return error.InvalidNPMLockfile;
+ os = os.apply(item.data.e_string.data);
+ }
+ break :arch os;
+ } else .all,
+
+ .man_dir = String{},
+
+ .integrity = if (pkg.get("integrity")) |integrity|
+ try Integrity.parse(
+ integrity.asString(this.allocator) orelse
+ return error.InvalidNPMLockfile,
+ )
+ else
+ Integrity{},
+ },
+ .bin = if (pkg.get("bin")) |bin| bin: {
+ // we already check these conditions during counting
+ std.debug.assert(bin.data == .e_object);
+ std.debug.assert(bin.data.e_object.properties.len > 0);
+
+ // in npm lockfile, the bin is always an object, even if it is only a single one
+ // we need to detect if it's a single entry and lower it to a file.
+ if (bin.data.e_object.properties.len == 1) {
+ const prop = bin.data.e_object.properties.at(0);
+ const key = prop.key.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ const script_value = prop.value.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+
+ if (strings.eql(key, pkg_name)) {
+ break :bin .{
+ .tag = .file,
+ .value = Bin.Value.init(.{
+ .file = builder.append(String, script_value),
+ }),
+ };
+ }
+
+ break :bin .{
+ .tag = .named_file,
+ .value = Bin.Value.init(.{
+ .named_file = .{
+ builder.append(String, key),
+ builder.append(String, script_value),
+ },
+ }),
+ };
+ }
+
+ const view: Install.ExternalStringList = .{
+ .off = @truncate(this.buffers.extern_strings.items.len),
+ .len = @intCast(bin.data.e_object.properties.len * 2),
+ };
+
+ for (bin.data.e_object.properties.slice()) |bin_entry| {
+ const key = bin_entry.key.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ const script_value = bin_entry.value.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ this.buffers.extern_strings.appendAssumeCapacity(builder.append(ExternalString, key));
+ this.buffers.extern_strings.appendAssumeCapacity(builder.append(ExternalString, script_value));
+ }
+
+ if (Environment.allow_assert) {
+ std.debug.assert(this.buffers.extern_strings.items.len == view.off + view.len);
+ std.debug.assert(this.buffers.extern_strings.items.len <= this.buffers.extern_strings.capacity);
+ }
+
+ break :bin .{
+ .tag = .map,
+ .value = Bin.Value.init(.{
+ .map = view,
+ }),
+ };
+ } else Bin.init(),
+
+ .scripts = .{},
+ });
+
+ if (is_workspace) {
+ std.debug.assert(package_id != 0); // root package should not be in it's own workspace
+
+ // we defer doing getOrPutID for non-workspace packages because it depends on the resolution being set.
+ try this.getOrPutID(package_id, name_hash);
+ }
+ }
+
+ if (Environment.allow_assert) {
+ std.debug.assert(this.packages.len == package_idx);
+ }
+
+ // ignoring length check because we pre-allocated it. the length may shrink later
+ // so it's faster if we ignore the underlying length buffer and just assign it at the very end.
+ var dependencies_buf = this.buffers.dependencies.items.ptr[0..num_deps];
+ var resolutions_buf = this.buffers.resolutions.items.ptr[0..num_deps];
+
+ // pre-initialize the dependencies and resolutions to `unset_package_id`
+ if (Environment.allow_assert) {
+ @memset(dependencies_buf, Dependency{});
+ @memset(resolutions_buf, unset_package_id);
+ }
+
+ var resolutions = this.packages.items(.resolution);
+ var metas = this.packages.items(.meta);
+ var dependencies_list = this.packages.items(.dependencies);
+ var resolution_list = this.packages.items(.resolutions);
+
+ if (Environment.allow_assert) {
+ for (resolutions) |r| {
+ std.debug.assert(r.tag == .uninitialized or r.tag == .workspace);
+ }
+ }
+
+ // Root resolution isn't hit through dependency tracing.
+ resolutions[0] = Resolution.init(.{ .root = {} });
+ metas[0].origin = .local;
+ try this.getOrPutID(0, this.packages.items(.name_hash)[0]);
+
+ // made it longer than max path just in case something stupid happens
+ var name_checking_buf: [bun.MAX_PATH_BYTES * 2]u8 = undefined;
+
+ // Dependency Linking Phase
+ package_idx = 0;
+ var is_first = true;
+ for (packages_properties.slice()) |entry| {
+ // this pass is allowed to make more assumptions because we already checked things during
+ // the counting pass
+ const pkg = entry.value.?.data.e_object;
+
+ if (pkg.get("link") != null or if (pkg.get("inBundle") orelse pkg.get("extraneous")) |x| x.data == .e_boolean and x.data.e_boolean.value else false) continue;
+
+ const pkg_path = entry.key.?.asString(allocator).?;
+
+ const dependencies_start = dependencies_buf.ptr;
+ const resolutions_start = resolutions_buf.ptr;
+
+ // this is in a defer because there are two places we end this loop iteration at.
+ defer {
+ if (dependencies_start == dependencies_buf.ptr) {
+ dependencies_list[package_idx] = .{ .len = 0 };
+ resolution_list[package_idx] = .{ .len = 0 };
+ } else {
+ // Calculate the offset + length by pointer arithmetic
+ const len: u32 = @truncate((@intFromPtr(resolutions_buf.ptr) - @intFromPtr(resolutions_start)) / @sizeOf(Install.PackageID));
+ if (Environment.allow_assert) {
+ std.debug.assert(len > 0);
+ std.debug.assert(len == ((@intFromPtr(dependencies_buf.ptr) - @intFromPtr(dependencies_start)) / @sizeOf(Dependency)));
+ }
+ dependencies_list[package_idx] = .{
+ .off = @truncate((@intFromPtr(dependencies_start) - @intFromPtr(this.buffers.dependencies.items.ptr)) / @sizeOf(Dependency)),
+ .len = len,
+ };
+ resolution_list[package_idx] = .{
+ .off = @truncate((@intFromPtr(resolutions_start) - @intFromPtr(this.buffers.resolutions.items.ptr)) / @sizeOf(Install.PackageID)),
+ .len = len,
+ };
+ }
+
+ package_idx += 1;
+ }
+
+ // a feature no one has heard about: https://docs.npmjs.com/cli/v10/configuring-npm/package-json#bundledependencies
+ const bundled_dependencies = if (pkg.get("bundleDependencies") orelse pkg.get("bundledDependencies")) |expr| deps: {
+ if (expr.data == .e_boolean) {
+ if (expr.data.e_boolean.value) continue;
+ break :deps null;
+ }
+ if (expr.data != .e_array) return error.InvalidNPMLockfile;
+ const arr: *E.Array = expr.data.e_array;
+ var map = std.StringArrayHashMapUnmanaged(void){};
+ try map.ensureTotalCapacity(allocator, arr.items.len);
+ for (arr.items.slice()) |item| {
+ map.putAssumeCapacity(item.asString(allocator) orelse return error.InvalidNPMLockfile, {});
+ }
+ break :deps map;
+ } else null;
+
+ if (is_first) {
+ is_first = false;
+ if (workspace_map) |wksp| {
+ for (wksp.keys(), wksp.values()) |key, value| {
+ const entry1 = id_map.get(key) orelse return error.InvalidNPMLockfile;
+ const name_hash = stringHash(value.name);
+ const wksp_name = builder.append(String, value.name);
+ const wksp_path = builder.append(String, key);
+ dependencies_buf[0] = Dependency{
+ .name = wksp_name,
+ .name_hash = name_hash,
+ .version = .{
+ .tag = .workspace,
+ .literal = wksp_path,
+ .value = .{
+ .workspace = wksp_path,
+ },
+ },
+ .behavior = .{
+ .workspace = true,
+ },
+ };
+ resolutions_buf[0] = entry1.new_package_id;
+
+ dependencies_buf = dependencies_buf[1..];
+ resolutions_buf = resolutions_buf[1..];
+ }
+ }
+ }
+
+ inline for (dependency_keys) |dep_key| {
+ if (pkg.get(@tagName(dep_key))) |deps| {
+ // fetch the peerDependenciesMeta if it exists
+ // this is only done for peerDependencies, obviously
+ const peer_dep_meta = if (dep_key == .peerDependencies)
+ if (pkg.get("peerDependenciesMeta")) |expr| peer_dep_meta: {
+ if (expr.data != .e_object) return error.InvalidNPMLockfile;
+ break :peer_dep_meta expr.data.e_object;
+ } else null
+ else
+ void{};
+
+ if (deps.data != .e_object) return error.InvalidNPMLockfile;
+ const properties = deps.data.e_object.properties;
+
+ dep_loop: for (properties.slice()) |prop| {
+ const name_bytes = prop.key.?.asString(this.allocator).?;
+ if (bundled_dependencies != null and bundled_dependencies.?.getIndex(name_bytes) != null) continue :dep_loop;
+
+ const version_bytes = prop.value.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ const name_hash = stringHash(name_bytes);
+ const dep_name = builder.appendWithHash(String, name_bytes, name_hash);
+
+ const dep_version = builder.append(String, version_bytes);
+ const sliced = dep_version.sliced(this.buffers.string_bytes.items);
+
+ debug("parsing {s}, {s}\n", .{ name_bytes, version_bytes });
+ const version = Dependency.parse(
+ this.allocator,
+ dep_name,
+ sliced.slice,
+ &sliced,
+ log,
+ ) orelse {
+ return error.InvalidNPMLockfile;
+ };
+ debug("-> {s}, {}\n", .{ @tagName(version.tag), version.value });
+
+ if (Environment.allow_assert) {
+ std.debug.assert(version.tag != .uninitialized);
+ }
+
+ const str_node_modules = if (pkg_path.len == 0) "node_modules/" else "/node_modules/";
+ const suffix_len = str_node_modules.len + name_bytes.len;
+
+ var buf_len: u32 = @as(u32, @intCast(pkg_path.len + suffix_len));
+ if (buf_len > name_checking_buf.len) {
+ return error.PathTooLong;
+ }
+
+ bun.copy(u8, name_checking_buf[0..pkg_path.len], pkg_path);
+ bun.copy(u8, name_checking_buf[pkg_path.len .. pkg_path.len + str_node_modules.len], str_node_modules);
+ bun.copy(u8, name_checking_buf[pkg_path.len + str_node_modules.len .. pkg_path.len + suffix_len], name_bytes);
+
+ while (true) {
+ debug("checking {s}", .{name_checking_buf[0..buf_len]});
+ if (id_map.get(name_checking_buf[0..buf_len])) |found_| {
+ var found = found_;
+ if (found.new_package_id == package_id_is_link) {
+ // it is a workspace package, resolve from the "link": true entry to the real entry.
+ const ref_pkg = packages_properties.at(found.old_json_index).value.?.data.e_object;
+ // the `else` here is technically possible to hit
+ const resolved_v = ref_pkg.get("resolved") orelse return error.LockfileWorkspaceMissingResolved;
+ const resolved = resolved_v.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ found = (id_map.get(resolved) orelse return error.InvalidNPMLockfile);
+ } else if (found.new_package_id == package_id_is_bundled) {
+ debug("skipping bundled dependency {s}", .{name_bytes});
+ continue :dep_loop;
+ }
+
+ const id = found.new_package_id;
+
+ var is_workspace = resolutions[id].tag == .workspace;
+
+ dependencies_buf[0] = Dependency{
+ .name = dep_name,
+ .name_hash = name_hash,
+ .version = version,
+ .behavior = .{
+ .normal = dep_key == .dependencies,
+ .optional = dep_key == .optionalDependencies,
+ .dev = dep_key == .devDependencies,
+ .peer = dep_key == .peerDependencies,
+ .workspace = is_workspace,
+ },
+ };
+ resolutions_buf[0] = id;
+
+ dependencies_buf = dependencies_buf[1..];
+ resolutions_buf = resolutions_buf[1..];
+
+ // If the package resolution is not set, resolve the target package
+ // using the information we have from the dependency declaration.
+ if (resolutions[id].tag == .uninitialized) {
+ debug("resolving '{s}'", .{name_bytes});
+
+ const res = resolved: {
+ const dep_pkg = packages_properties.at(found.old_json_index).value.?.data.e_object;
+ const npm_resolution = dep_pkg.get("resolved") orelse {
+ break :resolved Resolution.init(.{
+ .folder = builder.append(
+ String,
+ packages_properties.at(found.old_json_index).key.?.asString(allocator).?,
+ ),
+ });
+ };
+ const dep_resolved = npm_resolution.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+
+ break :resolved switch (version.tag) {
+ .uninitialized => std.debug.panic("Version string {s} resolved to `.uninitialized`", .{version_bytes}),
+ .npm, .dist_tag => res: {
+ // It is theoretically possible to hit this in a case where the resolved dependency is NOT
+ // an npm dependency, but that case is so convoluted that it is not worth handling.
+ //
+ // Deleting 'package-lock.json' would completely break the installation of the project.
+ //
+ // We assume that the given URL is to *some* npm registry, or the resolution is to a workspace package.
+ // If it is a workspace package, then this branch will not be hit as the resolution was already set earlier.
+ const dep_actual_version = (dep_pkg.get("version") orelse return error.InvalidNPMLockfile)
+ .asString(this.allocator) orelse return error.InvalidNPMLockfile;
+
+ const dep_actual_version_str = builder.append(String, dep_actual_version);
+ const dep_actual_version_sliced = dep_actual_version_str.sliced(this.buffers.string_bytes.items);
+
+ break :res Resolution.init(.{
+ .npm = .{
+ .url = builder.append(String, dep_resolved),
+ .version = Semver.Version.parse(dep_actual_version_sliced).version.fill(),
+ },
+ });
+ },
+ .tarball => if (strings.hasPrefixComptime(dep_resolved, "file:"))
+ Resolution.init(.{ .local_tarball = builder.append(String, dep_resolved[5..]) })
+ else
+ Resolution.init(.{ .remote_tarball = builder.append(String, dep_resolved) }),
+ .folder => Resolution.init(.{ .folder = builder.append(String, dep_resolved) }),
+ // not sure if this is possible to hit
+ .symlink => Resolution.init(.{ .folder = builder.append(String, dep_resolved) }),
+ .workspace => workspace: {
+ var input = builder.append(String, dep_resolved).sliced(this.buffers.string_bytes.items);
+ if (strings.hasPrefixComptime(input.slice, "workspace:")) {
+ input = input.sub(input.slice["workspace:".len..]);
+ }
+ break :workspace Resolution.init(.{
+ .workspace = input.value(),
+ });
+ },
+ .git => res: {
+ const str = (if (strings.hasPrefixComptime(dep_resolved, "git+"))
+ builder.append(String, dep_resolved[4..])
+ else
+ builder.append(String, dep_resolved))
+ .sliced(this.buffers.string_bytes.items);
+
+ const hash_index = strings.lastIndexOfChar(str.slice, '#') orelse return error.InvalidNPMLockfile;
+
+ const commit = str.sub(str.slice[hash_index + 1 ..]).value();
+ break :res Resolution.init(.{
+ .git = .{
+ .owner = version.value.git.owner,
+ .repo = str.sub(str.slice[0..hash_index]).value(),
+ .committish = commit,
+ .resolved = commit,
+ .package_name = dep_name,
+ },
+ });
+ },
+ .github => res: {
+ const str = (if (strings.hasPrefixComptime(dep_resolved, "git+"))
+ builder.append(String, dep_resolved[4..])
+ else
+ builder.append(String, dep_resolved))
+ .sliced(this.buffers.string_bytes.items);
+
+ const hash_index = strings.lastIndexOfChar(str.slice, '#') orelse return error.InvalidNPMLockfile;
+
+ const commit = str.sub(str.slice[hash_index + 1 ..]).value();
+ break :res Resolution.init(.{
+ .git = .{
+ .owner = version.value.github.owner,
+ .repo = str.sub(str.slice[0..hash_index]).value(),
+ .committish = commit,
+ .resolved = commit,
+ .package_name = dep_name,
+ },
+ });
+ },
+ };
+ };
+ debug("-> {}", .{res.fmtForDebug(this.buffers.string_bytes.items)});
+
+ resolutions[id] = res;
+ metas[id].origin = switch (res.tag) {
+ // This works?
+ .root => .local,
+ else => .npm,
+ };
+
+ try this.getOrPutID(id, this.packages.items(.name_hash)[id]);
+ }
+
+ continue :dep_loop;
+ }
+ // step
+ if (strings.lastIndexOf(name_checking_buf[0..buf_len -| ("node_modules/".len + name_bytes.len)], "node_modules/")) |idx| {
+ debug("found 'node_modules/' at {d}", .{idx});
+ buf_len = @intCast(idx + "node_modules/".len + name_bytes.len);
+ bun.copy(u8, name_checking_buf[idx + "node_modules/".len .. idx + "node_modules/".len + name_bytes.len], name_bytes);
+ } else if (!strings.hasPrefixComptime(name_checking_buf[0..buf_len], "node_modules/")) {
+ // this is hit if you start from `packages/etc`, from `packages/etc/node_modules/xyz`
+ // we need to hit the root node_modules
+ buf_len = @intCast("node_modules/".len + name_bytes.len);
+ bun.copy(u8, name_checking_buf[0..buf_len], "node_modules/");
+ bun.copy(u8, name_checking_buf[buf_len - name_bytes.len .. buf_len], name_bytes);
+ } else {
+ // optional peer dependencies can be ... optional
+ if (dep_key == .peerDependencies) {
+ if (peer_dep_meta) |o| if (o.get(name_bytes)) |meta| {
+ if (meta.data != .e_object) return error.InvalidNPMLockfile;
+ if (meta.data.e_object.get("optional")) |optional| {
+ if (optional.data != .e_boolean) return error.InvalidNPMLockfile;
+ if (optional.data.e_boolean.value) {
+ dependencies_buf[0] = Dependency{
+ .name = dep_name,
+ .name_hash = name_hash,
+ .version = version,
+ .behavior = .{
+ .normal = dep_key == .dependencies,
+ .optional = true,
+ .dev = dep_key == .devDependencies,
+ .peer = dep_key == .peerDependencies,
+ .workspace = false,
+ },
+ };
+ resolutions_buf[0] = Install.invalid_package_id;
+ dependencies_buf = dependencies_buf[1..];
+ resolutions_buf = resolutions_buf[1..];
+ continue :dep_loop;
+ }
+ }
+ };
+ }
+
+ // it is technically possible to get a package-lock.json without a dependency.
+ // it's very unlikely, but possible. when NPM sees this, it essentially doesnt install the package, and treats it like it doesn't exist.
+ // in test/cli/install/migrate-fixture, you can observe this for `iconv-lite`
+ debug("could not find package '{s}' in '{s}'", .{ name_bytes, pkg_path });
+ continue :dep_loop;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ this.buffers.resolutions.items.len = (@intFromPtr(resolutions_buf.ptr) - @intFromPtr(this.buffers.resolutions.items.ptr)) / @sizeOf(Install.PackageID);
+ this.buffers.dependencies.items.len = this.buffers.resolutions.items.len;
+
+ // In allow_assert, we prefill this buffer with uninitialized values that we can detect later
+ // It is our fault if we hit an error here, making it safe to disable in release.
+ if (Environment.allow_assert) {
+ std.debug.assert(this.buffers.dependencies.items.len == (@intFromPtr(dependencies_buf.ptr) - @intFromPtr(this.buffers.dependencies.items.ptr)) / @sizeOf(Dependency));
+ std.debug.assert(this.buffers.dependencies.items.len <= num_deps);
+ var crash = false;
+ for (this.buffers.dependencies.items, 0..) |r, i| {
+ // 'if behavior is uninitialized'
+ if (r.behavior.eq(.{})) {
+ debug("dependency index '{d}' was not set", .{i});
+ crash = true;
+ }
+ }
+ for (this.buffers.resolutions.items, 0..) |r, i| {
+ if (r == unset_package_id) {
+ debug("resolution index '{d}' was not set", .{i});
+ crash = true;
+ }
+ }
+ if (crash) {
+ std.debug.panic("Assertion failure, see above", .{});
+ }
+ }
+
+ // A package not having a resolution, however, is not our fault.
+ // This can be triggered by a bad lockfile with extra packages. NPM should trim packages out automatically.
+ var is_missing_resolutions = false;
+ for (resolutions, 0..) |r, i| {
+ if (r.tag == .uninitialized) {
+ Output.printErrorln("Could not resolve package '{s}' in lockfile.", .{this.packages.items(.name)[i].slice(this.buffers.string_bytes.items)});
+ is_missing_resolutions = true;
+ } else if (Environment.allow_assert) {
+ // Assertion from appendPackage. If we do this too early it will always fail as we dont have the resolution written
+ // but after we write all the data, there is no excuse for this to fail.
+ //
+ // If this is hit, it means getOrPutID was not called on this package id. Look for where 'resolution[i]' is set
+ std.debug.assert(this.getPackageID(this.packages.items(.name_hash)[i], null, &r) != null);
+ }
+ }
+ if (is_missing_resolutions) {
+ return error.NotAllPackagesGotResolved;
+ }
+
+ // if (Environment.isDebug) {
+ // const dump_file = try std.fs.cwd().createFileZ("before-clean.json", .{});
+ // defer dump_file.close();
+ // try std.json.stringify(this, .{ .whitespace = .indent_2 }, dump_file.writer());
+ // }
+
+ // This is definitely a memory leak, but it's fine because there is no install api, so this can only be leaked once per process.
+ // This operation is neccecary because callers of `loadFromDisk` assume the data is written into the passed `this`.
+ // You'll find that not cleaning the lockfile will cause `bun install` to not actually install anything since it doesnt have any hoisted trees.
+ this.* = (try this.cleanWithLogger(&[_]Install.PackageManager.UpdateRequest{}, log, false)).*;
+
+ // if (Environment.isDebug) {
+ // const dump_file = try std.fs.cwd().createFileZ("after-clean.json", .{});
+ // defer dump_file.close();
+ // try std.json.stringify(this, .{ .whitespace = .indent_2 }, dump_file.writer());
+ // }
+
+ if (Environment.allow_assert) {
+ try this.verifyData();
+ }
+
+ this.meta_hash = try this.generateMetaHash(false);
+
+ return LoadFromDiskResult{ .ok = this };
+}
+
+fn packageNameFromPath(pkg_path: []const u8) []const u8 {
+ if (pkg_path.len == 0) return "";
+
+ const pkg_name_start: usize = if (strings.lastIndexOf(pkg_path, "/node_modules/")) |last_index|
+ last_index + "/node_modules/".len
+ else if (strings.hasPrefixComptime(pkg_path, "node_modules/"))
+ "node_modules/".len
+ else
+ strings.lastIndexOf(pkg_path, "/") orelse 0;
+
+ return pkg_path[pkg_name_start..];
+}
diff --git a/src/install/npm.zig b/src/install/npm.zig
index 9f3f2952c..4cf1c2b71 100644
--- a/src/install/npm.zig
+++ b/src/install/npm.zig
@@ -68,8 +68,20 @@ pub const Registry = struct {
pub fn fromAPI(name: string, registry_: Api.NpmRegistry, allocator: std.mem.Allocator, env: *DotEnv.Loader) !Scope {
var registry = registry_;
+
+ // Support $ENV_VAR for registry URLs
+ if (strings.startsWithChar(registry_.url, '$')) {
+ // If it became "$ENV_VAR/", then we need to remove the trailing slash
+ if (env.get(strings.trim(registry_.url[1..], "/"))) |replaced_url| {
+ if (replaced_url.len > 1) {
+ registry.url = replaced_url;
+ }
+ }
+ }
+
var url = URL.parse(registry.url);
var auth: string = "";
+ var needs_normalize = false;
if (registry.token.len == 0) {
outer: {
@@ -79,10 +91,12 @@ pub const Registry = struct {
url.pathname = pathname;
url.path = pathname;
}
-
+ var needs_to_check_slash = true;
while (strings.lastIndexOfChar(pathname, ':')) |colon| {
var segment = pathname[colon + 1 ..];
pathname = pathname[0..colon];
+ needs_to_check_slash = false;
+ needs_normalize = true;
if (pathname.len > 1 and pathname[pathname.len - 1] == '/') {
pathname = pathname[0 .. pathname.len - 1];
}
@@ -113,6 +127,47 @@ pub const Registry = struct {
continue;
}
}
+
+ // In this case, there is only one.
+ if (needs_to_check_slash) {
+ if (strings.lastIndexOfChar(pathname, '/')) |last_slash| {
+ var remain = pathname[last_slash + 1 ..];
+ if (strings.indexOfChar(remain, '=')) |eql_i| {
+ const segment = remain[0..eql_i];
+ var value = remain[eql_i + 1 ..];
+
+ // https://github.com/yarnpkg/yarn/blob/6db39cf0ff684ce4e7de29669046afb8103fce3d/src/registries/npm-registry.js#L364
+ // Bearer Token
+ if (strings.eqlComptime(segment, "_authToken")) {
+ registry.token = value;
+ pathname = pathname[0 .. last_slash + 1];
+ needs_normalize = true;
+ break :outer;
+ }
+
+ if (strings.eqlComptime(segment, "_auth")) {
+ auth = value;
+ pathname = pathname[0 .. last_slash + 1];
+ needs_normalize = true;
+ break :outer;
+ }
+
+ if (strings.eqlComptime(segment, "username")) {
+ registry.username = value;
+ pathname = pathname[0 .. last_slash + 1];
+ needs_normalize = true;
+ break :outer;
+ }
+
+ if (strings.eqlComptime(segment, "_password")) {
+ registry.password = value;
+ pathname = pathname[0 .. last_slash + 1];
+ needs_normalize = true;
+ break :outer;
+ }
+ }
+ }
+ }
}
registry.username = env.getAuto(registry.username);
@@ -133,6 +188,16 @@ pub const Registry = struct {
registry.token = env.getAuto(registry.token);
+ if (needs_normalize) {
+ url = URL.parse(
+ try std.fmt.allocPrint(allocator, "{s}://{}/{s}/", .{
+ url.displayProtocol(),
+ url.displayHost(),
+ strings.trim(url.pathname, "/"),
+ }),
+ );
+ }
+
return Scope{ .name = name, .url = url, .token = registry.token, .auth = auth };
}
};
@@ -262,12 +327,18 @@ pub const OperatingSystem = enum(u16) {
return (@intFromEnum(this) & linux) != 0;
} else if (comptime Environment.isMac) {
return (@intFromEnum(this) & darwin) != 0;
+ } else if (comptime Environment.isWindows) {
+ return (@intFromEnum(this) & win32) != 0;
} else {
return false;
}
}
- const NameMap = ComptimeStringMap(u16, .{
+ pub inline fn has(this: OperatingSystem, other: u16) bool {
+ return (@intFromEnum(this) & other) != 0;
+ }
+
+ pub const NameMap = ComptimeStringMap(u16, .{
.{ "aix", aix },
.{ "darwin", darwin },
.{ "freebsd", freebsd },
@@ -318,7 +389,7 @@ pub const Architecture = enum(u16) {
pub const all_value: u16 = arm | arm64 | ia32 | mips | mipsel | ppc | ppc64 | s390 | s390x | x32 | x64;
- const NameMap = ComptimeStringMap(u16, .{
+ pub const NameMap = ComptimeStringMap(u16, .{
.{ "arm", arm },
.{ "arm64", arm64 },
.{ "ia32", ia32 },
@@ -332,6 +403,10 @@ pub const Architecture = enum(u16) {
.{ "x64", x64 },
});
+ pub inline fn has(this: Architecture, other: u16) bool {
+ return (@intFromEnum(this) & other) != 0;
+ }
+
pub fn isMatch(this: Architecture) bool {
if (comptime Environment.isAarch64) {
return (@intFromEnum(this) & arm64) != 0;
@@ -733,29 +808,39 @@ pub const PackageManifest = struct {
return this.findByVersion(left.version);
}
- const releases = this.pkg.releases.keys.get(this.versions);
+ if (this.findByDistTag("latest")) |result| {
+ if (group.satisfies(result.version)) {
+ if (group.flags.isSet(Semver.Query.Group.Flags.pre)) {
+ if (left.version.order(result.version, this.string_buf, this.string_buf) == .eq) {
+ // if prerelease, use latest if semver+tag match range exactly
+ return result;
+ }
+ } else {
+ return result;
+ }
+ }
+ }
- if (group.flags.isSet(Semver.Query.Group.Flags.pre)) {
- const prereleases = this.pkg.prereleases.keys.get(this.versions);
- var i = prereleases.len;
+ {
+ const releases = this.pkg.releases.keys.get(this.versions);
+ var i = releases.len;
+ // For now, this is the dumb way
while (i > 0) : (i -= 1) {
- const version = prereleases[i - 1];
- const packages = this.pkg.prereleases.values.get(this.package_versions);
+ const version = releases[i - 1];
+ const packages = this.pkg.releases.values.get(this.package_versions);
if (group.satisfies(version)) {
return .{ .version = version, .package = &packages[i - 1] };
}
}
- } else if (this.findByDistTag("latest")) |result| {
- if (group.satisfies(result.version)) return result;
}
- {
- var i = releases.len;
- // // For now, this is the dumb way
+ if (group.flags.isSet(Semver.Query.Group.Flags.pre)) {
+ const prereleases = this.pkg.prereleases.keys.get(this.versions);
+ var i = prereleases.len;
while (i > 0) : (i -= 1) {
- const version = releases[i - 1];
- const packages = this.pkg.releases.values.get(this.package_versions);
+ const version = prereleases[i - 1];
+ const packages = this.pkg.prereleases.values.get(this.package_versions);
if (group.satisfies(version)) {
return .{ .version = version, .package = &packages[i - 1] };
@@ -796,7 +881,7 @@ pub const PackageManifest = struct {
}
}
- var result = PackageManifest{};
+ var result: PackageManifest = bun.serializable(PackageManifest{});
var string_pool = String.Builder.StringPool.init(default_allocator);
defer string_pool.deinit();
@@ -1029,6 +1114,9 @@ pub const PackageManifest = struct {
var dependency_values = version_extern_strings;
var dependency_names = all_dependency_names_and_values;
var prev_extern_bin_group = extern_strings_bin_entries;
+ const empty_version = bun.serializable(PackageVersion{
+ .bin = Bin.init(),
+ });
for (versions) |prop| {
const version_name = prop.key.?.asString(allocator) orelse continue;
@@ -1048,7 +1136,7 @@ pub const PackageManifest = struct {
}
if (!parsed_version.valid) continue;
- var package_version = PackageVersion{};
+ var package_version: PackageVersion = empty_version;
if (prop.value.?.asProperty("cpu")) |cpu| {
package_version.cpu = Architecture.all;
diff --git a/src/install/padding_checker.zig b/src/install/padding_checker.zig
index 1d9405a43..52d343b4f 100644
--- a/src/install/padding_checker.zig
+++ b/src/install/padding_checker.zig
@@ -55,7 +55,6 @@ pub fn assertNoUninitializedPadding(comptime T: type) void {
// if (info.layout != .Extern) {
// @compileError("assertNoUninitializedPadding(" ++ @typeName(T) ++ ") expects an extern struct type, got a struct of layout '" ++ @tagName(info.layout) ++ "'");
// }
- var i = 0;
for (info.fields) |field| {
const fieldInfo = @typeInfo(field.type);
switch (fieldInfo) {
@@ -69,9 +68,12 @@ pub fn assertNoUninitializedPadding(comptime T: type) void {
else => {},
}
}
+
if (info_ == .Union) {
return;
}
+
+ var i = 0;
for (info.fields, 0..) |field, j| {
const offset = @offsetOf(T, field.name);
if (offset != i) {
@@ -90,4 +92,17 @@ pub fn assertNoUninitializedPadding(comptime T: type) void {
}
i = offset + @sizeOf(field.type);
}
+
+ if (i != @sizeOf(T)) {
+ @compileError(std.fmt.comptimePrint(
+ \\Expected no possibly uninitialized bytes of memory in '{s}', but found a {d} byte gap at the end of the struct. This can be fixed by adding a padding field to the struct like `padding: [{d}]u8 = .{{0}} ** {d},` between these fields. For more information, look at `padding_checker.zig`
+ ,
+ .{
+ @typeName(T),
+ @sizeOf(T) - i,
+ @sizeOf(T) - i,
+ @sizeOf(T) - i,
+ },
+ ));
+ }
}
diff --git a/src/install/repository.zig b/src/install/repository.zig
index 564306733..17afec079 100644
--- a/src/install/repository.zig
+++ b/src/install/repository.zig
@@ -27,6 +27,12 @@ pub const Repository = extern struct {
resolved: GitSHA = .{},
package_name: String = .{},
+ pub const Hosts = bun.ComptimeStringMap(string, .{
+ .{ "bitbucket", ".org" },
+ .{ "github", ".com" },
+ .{ "gitlab", ".com" },
+ });
+
pub fn verify(this: *const Repository) void {
this.owner.assertDefined();
this.repo.assertDefined();
@@ -125,15 +131,31 @@ pub const Repository = extern struct {
if (strings.hasPrefixComptime(url, "ssh://")) {
final_path_buf[0.."https".len].* = "https".*;
bun.copy(u8, final_path_buf["https".len..], url["ssh".len..]);
- return final_path_buf[0..(url.len - "ssh".len + "https".len)];
+ return final_path_buf[0 .. url.len - "ssh".len + "https".len];
}
+
if (Dependency.isSCPLikePath(url)) {
final_path_buf[0.."https://".len].* = "https://".*;
var rest = final_path_buf["https://".len..];
+
+ const colon_index = strings.indexOfChar(url, ':');
+
+ if (colon_index) |colon| {
+ // make sure known hosts have `.com` or `.org`
+ if (Hosts.get(url[0..colon])) |tld| {
+ bun.copy(u8, rest, url[0..colon]);
+ bun.copy(u8, rest[colon..], tld);
+ rest[colon + tld.len] = '/';
+ bun.copy(u8, rest[colon + tld.len + 1 ..], url[colon + 1 ..]);
+ return final_path_buf[0 .. url.len + "https://".len + tld.len];
+ }
+ }
+
bun.copy(u8, rest, url);
- if (strings.indexOfChar(rest, ':')) |colon| rest[colon] = '/';
- return final_path_buf[0..(url.len + "https://".len)];
+ if (colon_index) |colon| rest[colon] = '/';
+ return final_path_buf[0 .. url.len + "https://".len];
}
+
return null;
}
diff --git a/src/install/resolution.zig b/src/install/resolution.zig
index 38f238bd4..84d43ff1a 100644
--- a/src/install/resolution.zig
+++ b/src/install/resolution.zig
@@ -8,12 +8,21 @@ const string = @import("../string_types.zig").string;
const ExtractTarball = @import("./extract_tarball.zig");
const strings = @import("../string_immutable.zig");
const VersionedURL = @import("./versioned_url.zig").VersionedURL;
+const bun = @import("root").bun;
pub const Resolution = extern struct {
tag: Tag = .uninitialized,
_padding: [7]u8 = .{0} ** 7,
value: Value = .{ .uninitialized = {} },
+ /// Use like Resolution.init(.{ .npm = VersionedURL{ ... } })
+ pub inline fn init(value: anytype) Resolution {
+ return Resolution{
+ .tag = @field(Tag, @typeInfo(@TypeOf(value)).Struct.fields[0].name),
+ .value = Value.init(value),
+ };
+ }
+
pub fn order(
lhs: *const Resolution,
rhs: *const Resolution,
@@ -73,51 +82,55 @@ pub const Resolution = extern struct {
}
pub fn clone(this: *const Resolution, buf: []const u8, comptime Builder: type, builder: Builder) Resolution {
- return Resolution{
+ return .{
.tag = this.tag,
.value = switch (this.tag) {
- .npm => .{
- .npm = this.value.npm.clone(buf, Builder, builder),
- },
- .local_tarball => .{
+ .npm => Value.init(.{ .npm = this.value.npm.clone(buf, Builder, builder) }),
+ .local_tarball => Value.init(.{
.local_tarball = builder.append(String, this.value.local_tarball.slice(buf)),
- },
- .folder => .{
+ }),
+ .folder => Value.init(.{
.folder = builder.append(String, this.value.folder.slice(buf)),
- },
- .remote_tarball => .{
+ }),
+ .remote_tarball => Value.init(.{
.remote_tarball = builder.append(String, this.value.remote_tarball.slice(buf)),
- },
- .workspace => .{
+ }),
+ .workspace => Value.init(.{
.workspace = builder.append(String, this.value.workspace.slice(buf)),
- },
- .symlink => .{
+ }),
+ .symlink => Value.init(.{
.symlink = builder.append(String, this.value.symlink.slice(buf)),
- },
- .single_file_module => .{
+ }),
+ .single_file_module => Value.init(.{
.single_file_module = builder.append(String, this.value.single_file_module.slice(buf)),
- },
- .git => .{
+ }),
+ .git => Value.init(.{
.git = this.value.git.clone(buf, Builder, builder),
- },
- .github => .{
+ }),
+ .github => Value.init(.{
.github = this.value.github.clone(buf, Builder, builder),
- },
- .gitlab => .{
+ }),
+ .gitlab => Value.init(.{
.gitlab = this.value.gitlab.clone(buf, Builder, builder),
+ }),
+ .root => Value.init(.{ .root = {} }),
+ else => {
+ std.debug.panic("Internal error: unexpected resolution tag: {}", .{this.tag});
},
- .root => .{ .root = {} },
- else => unreachable,
},
};
}
- pub fn fmt(this: *const Resolution, buf: []const u8) Formatter {
- return Formatter{ .resolution = this, .buf = buf };
+ pub fn fmt(this: *const Resolution, string_bytes: []const u8) Formatter {
+ return Formatter{ .resolution = this, .buf = string_bytes };
}
- pub fn fmtURL(this: *const Resolution, options: *const PackageManager.Options, buf: []const u8) URLFormatter {
- return URLFormatter{ .resolution = this, .buf = buf, .options = options };
+ pub fn fmtURL(this: *const Resolution, options: *const PackageManager.Options, string_bytes: []const u8) URLFormatter {
+ return URLFormatter{ .resolution = this, .buf = string_bytes, .options = options };
+ }
+
+ pub fn fmtForDebug(this: *const Resolution, string_bytes: []const u8) DebugFormatter {
+ return DebugFormatter{ .resolution = this, .buf = string_bytes };
}
pub fn eql(
@@ -224,6 +237,31 @@ pub const Resolution = extern struct {
}
};
+ pub const DebugFormatter = struct {
+ resolution: *const Resolution,
+ buf: []const u8,
+
+ pub fn format(formatter: DebugFormatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void {
+ try writer.writeAll("Resolution{ .");
+ try writer.writeAll(std.enums.tagName(Tag, formatter.resolution.tag) orelse "invalid");
+ try writer.writeAll(" = ");
+ switch (formatter.resolution.tag) {
+ .npm => try formatter.resolution.value.npm.version.fmt(formatter.buf).format(layout, opts, writer),
+ .local_tarball => try writer.writeAll(formatter.resolution.value.local_tarball.slice(formatter.buf)),
+ .folder => try writer.writeAll(formatter.resolution.value.folder.slice(formatter.buf)),
+ .remote_tarball => try writer.writeAll(formatter.resolution.value.remote_tarball.slice(formatter.buf)),
+ .git => try formatter.resolution.value.git.formatAs("git+", formatter.buf, layout, opts, writer),
+ .github => try formatter.resolution.value.github.formatAs("github:", formatter.buf, layout, opts, writer),
+ .gitlab => try formatter.resolution.value.gitlab.formatAs("gitlab:", formatter.buf, layout, opts, writer),
+ .workspace => try std.fmt.format(writer, "workspace:{s}", .{formatter.resolution.value.workspace.slice(formatter.buf)}),
+ .symlink => try std.fmt.format(writer, "link:{s}", .{formatter.resolution.value.symlink.slice(formatter.buf)}),
+ .single_file_module => try std.fmt.format(writer, "module:{s}", .{formatter.resolution.value.single_file_module.slice(formatter.buf)}),
+ else => try writer.writeAll("{}"),
+ }
+ try writer.writeAll(" }");
+ }
+ };
+
pub const Value = extern union {
uninitialized: void,
root: void,
@@ -248,6 +286,11 @@ pub const Resolution = extern struct {
symlink: String,
single_file_module: String,
+
+ /// To avoid undefined memory between union values, we must zero initialize the union first.
+ pub fn init(field: anytype) Value {
+ return bun.serializableInto(Value, field);
+ }
};
pub const Tag = enum(u8) {
diff --git a/src/install/semver.zig b/src/install/semver.zig
index f76238fa5..9572b85e2 100644
--- a/src/install/semver.zig
+++ b/src/install/semver.zig
@@ -576,23 +576,36 @@ pub const SlicedString = struct {
slice: string,
pub inline fn init(buf: string, slice: string) SlicedString {
+ if (Environment.allow_assert) {
+ if (@intFromPtr(buf.ptr) > @intFromPtr(slice.ptr)) {
+ @panic("SlicedString.init buf is not in front of slice");
+ }
+ }
return SlicedString{ .buf = buf, .slice = slice };
}
pub inline fn external(this: SlicedString) ExternalString {
- if (comptime Environment.allow_assert) std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.slice.ptr) and ((@intFromPtr(this.slice.ptr) + this.slice.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ if (comptime Environment.allow_assert) {
+ std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.slice.ptr) and ((@intFromPtr(this.slice.ptr) + this.slice.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ }
return ExternalString.init(this.buf, this.slice, bun.Wyhash.hash(0, this.slice));
}
pub inline fn value(this: SlicedString) String {
- if (comptime Environment.allow_assert) std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.slice.ptr) and ((@intFromPtr(this.slice.ptr) + this.slice.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ if (comptime Environment.allow_assert) {
+ std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.slice.ptr) and ((@intFromPtr(this.slice.ptr) + this.slice.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ }
return String.init(this.buf, this.slice);
}
pub inline fn sub(this: SlicedString, input: string) SlicedString {
- std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.buf.ptr) and ((@intFromPtr(input.ptr) + input.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ if (Environment.allow_assert) {
+ if (!(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.buf.ptr) and ((@intFromPtr(input.ptr) + input.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)))) {
+ @panic("SlicedString.sub input is not a substring of the slice");
+ }
+ }
return SlicedString{ .buf = this.buf, .slice = input };
}
};