aboutsummaryrefslogtreecommitdiff
path: root/src/install
diff options
context:
space:
mode:
Diffstat (limited to 'src/install')
-rw-r--r--src/install/dependency.zig160
-rw-r--r--src/install/install.zig460
-rw-r--r--src/install/integrity.zig47
-rw-r--r--src/install/lockfile.zig901
-rw-r--r--src/install/migration.zig947
-rw-r--r--src/install/npm.zig23
-rw-r--r--src/install/padding_checker.zig17
-rw-r--r--src/install/resolution.zig47
-rw-r--r--src/install/semver.zig19
9 files changed, 2336 insertions, 285 deletions
diff --git a/src/install/dependency.zig b/src/install/dependency.zig
index cb73c04e1..ca0d702aa 100644
--- a/src/install/dependency.zig
+++ b/src/install/dependency.zig
@@ -49,10 +49,10 @@ version: Dependency.Version = .{},
/// - `peerDependencies`
/// Technically, having the same package name specified under multiple fields is invalid
/// But we don't want to allocate extra arrays for them. So we use a bitfield instead.
-behavior: Behavior = .uninitialized,
+behavior: Behavior = Behavior.uninitialized,
/// Sorting order for dependencies is:
-/// 1. [`dependencies`, `devDependencies`, `optionalDependencies`, `peerDependencies`]
+/// 1. [ `peerDependencies`, `optionalDependencies`, `devDependencies`, `dependencies` ]
/// 2. name ASC
/// "name" must be ASC so that later, when we rebuild the lockfile
/// we insert it back in reverse order without an extra sorting pass
@@ -147,7 +147,7 @@ pub fn toDependency(
return Dependency{
.name = name,
.name_hash = @as(u64, @bitCast(this[8..16].*)),
- .behavior = @as(Dependency.Behavior, @enumFromInt(this[16])),
+ .behavior = @bitCast(this[16]),
.version = Dependency.Version.toVersion(name, this[17..this.len].*, ctx),
};
}
@@ -156,7 +156,7 @@ pub fn toExternal(this: Dependency) External {
var bytes: External = undefined;
bytes[0..this.name.bytes.len].* = this.name.bytes;
bytes[8..16].* = @as([8]u8, @bitCast(this.name_hash));
- bytes[16] = @intFromEnum(this.behavior);
+ bytes[16] = @bitCast(this.behavior);
bytes[17..bytes.len].* = this.version.toExternal();
return bytes;
}
@@ -221,12 +221,16 @@ pub inline fn isGitHubRepoPath(dependency: string) bool {
return hash_index != dependency.len - 1 and first_slash_index > 0 and first_slash_index != dependency.len - 1;
}
-// Github allows for the following format of URL:
-// https://github.com/<org>/<repo>/tarball/<ref>
-// This is a legacy (but still supported) method of retrieving a tarball of an
-// entire source tree at some git reference. (ref = branch, tag, etc. Note: branch
-// can have arbitrary number of slashes)
+/// Github allows for the following format of URL:
+/// https://github.com/<org>/<repo>/tarball/<ref>
+/// This is a legacy (but still supported) method of retrieving a tarball of an
+/// entire source tree at some git reference. (ref = branch, tag, etc. Note: branch
+/// can have arbitrary number of slashes)
+///
+/// This also checks for a github url that ends with ".tar.gz"
pub inline fn isGitHubTarballPath(dependency: string) bool {
+ if (isTarball(dependency)) return true;
+
var parts = strings.split(dependency, "/");
var n_parts: usize = 0;
@@ -248,7 +252,7 @@ pub inline fn isTarball(dependency: string) bool {
}
pub const Version = struct {
- tag: Dependency.Version.Tag = .uninitialized,
+ tag: Tag = .uninitialized,
literal: String = .{},
value: Value = .{ .uninitialized = {} },
@@ -610,7 +614,7 @@ pub const Version = struct {
}
};
- const NpmInfo = struct {
+ pub const NpmInfo = struct {
name: String,
version: Semver.Query.Group,
@@ -619,7 +623,7 @@ pub const Version = struct {
}
};
- const TagInfo = struct {
+ pub const TagInfo = struct {
name: String,
tag: String,
@@ -628,7 +632,7 @@ pub const Version = struct {
}
};
- const TarballInfo = struct {
+ pub const TarballInfo = struct {
uri: URI,
package_name: String = .{},
@@ -670,7 +674,8 @@ pub inline fn parse(
sliced: *const SlicedString,
log: ?*logger.Log,
) ?Version {
- return parseWithOptionalTag(allocator, alias, dependency, null, sliced, log);
+ const dep = std.mem.trimLeft(u8, dependency, " \t\n\r");
+ return parseWithTag(allocator, alias, dep, Version.Tag.infer(dep), sliced, log);
}
pub fn parseWithOptionalTag(
@@ -888,6 +893,12 @@ pub fn parseWithTag(
.literal = sliced.value(),
.value = .{ .tarball = .{ .uri = .{ .local = sliced.sub(dependency[7..]).value() } } },
};
+ } else if (strings.hasPrefixComptime(dependency, "file:")) {
+ return .{
+ .tag = .tarball,
+ .literal = sliced.value(),
+ .value = .{ .tarball = .{ .uri = .{ .local = sliced.sub(dependency[5..]).value() } } },
+ };
} else if (strings.contains(dependency, "://")) {
if (log_) |log| log.addErrorFmt(null, logger.Loc.Empty, allocator, "invalid or unsupported dependency \"{s}\"", .{dependency}) catch unreachable;
return null;
@@ -950,78 +961,83 @@ pub fn parseWithTag(
}
}
-pub const Behavior = enum(u8) {
- uninitialized = 0,
- _,
+pub const Behavior = packed struct(u8) {
+ pub const uninitialized: Behavior = .{};
+
+ // these padding fields are to have compatibility
+ // with older versions of lockfile v2
+ _unused_1: u1 = 0,
+
+ normal: bool = false,
+ optional: bool = false,
+ dev: bool = false,
+ peer: bool = false,
+ workspace: bool = false,
+
+ _unused_2: u2 = 0,
- pub const normal: u8 = 1 << 1;
- pub const optional: u8 = 1 << 2;
- pub const dev: u8 = 1 << 3;
- pub const peer: u8 = 1 << 4;
- pub const workspace: u8 = 1 << 5;
+ pub const normal = Behavior{ .normal = true };
+ pub const optional = Behavior{ .optional = true };
+ pub const dev = Behavior{ .dev = true };
+ pub const peer = Behavior{ .peer = true };
+ pub const workspace = Behavior{ .workspace = true };
pub inline fn isNormal(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.normal) != 0;
+ return this.normal;
}
pub inline fn isOptional(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.optional) != 0 and !this.isPeer();
+ return this.optional and !this.isPeer();
}
pub inline fn isDev(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.dev) != 0;
+ return this.dev;
}
pub inline fn isPeer(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.peer) != 0;
+ return this.peer;
}
pub inline fn isWorkspace(this: Behavior) bool {
- return (@intFromEnum(this) & Behavior.workspace) != 0;
+ return this.workspace;
}
pub inline fn setNormal(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.normal));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.normal));
- }
+ var b = this;
+ b.normal = value;
+ return b;
}
pub inline fn setOptional(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.optional));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.optional));
- }
+ var b = this;
+ b.optional = value;
+ return b;
}
pub inline fn setDev(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.dev));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.dev));
- }
+ var b = this;
+ b.dev = value;
+ return b;
}
pub inline fn setPeer(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.peer));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.peer));
- }
+ var b = this;
+ b.peer = value;
+ return b;
}
pub inline fn setWorkspace(this: Behavior, value: bool) Behavior {
- if (value) {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) | Behavior.workspace));
- } else {
- return @as(Behavior, @enumFromInt(@intFromEnum(this) & ~Behavior.workspace));
- }
+ var b = this;
+ b.workspace = value;
+ return b;
+ }
+
+ pub inline fn eq(lhs: Behavior, rhs: Behavior) bool {
+ return @as(u8, @bitCast(lhs)) == @as(u8, @bitCast(rhs));
}
pub inline fn cmp(lhs: Behavior, rhs: Behavior) std.math.Order {
- if (@intFromEnum(lhs) == @intFromEnum(rhs)) {
+ if (eq(lhs, rhs)) {
return .eq;
}
@@ -1074,4 +1090,42 @@ pub const Behavior = enum(u8) {
(features.peer_dependencies and this.isPeer()) or
this.isWorkspace();
}
+
+ pub fn format(self: Behavior, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
+ const fields = std.meta.fields(Behavior);
+ var num_fields: u8 = 0;
+ inline for (fields) |f| {
+ if (f.type == bool and @field(self, f.name)) {
+ num_fields += 1;
+ }
+ }
+ switch (num_fields) {
+ 0 => try writer.writeAll("Behavior.uninitialized"),
+ 1 => {
+ inline for (fields) |f| {
+ if (f.type == bool and @field(self, f.name)) {
+ try writer.writeAll("Behavior." ++ f.name);
+ break;
+ }
+ }
+ },
+ else => {
+ try writer.writeAll("Behavior{");
+ inline for (fields) |f| {
+ if (f.type == bool and @field(self, f.name)) {
+ try writer.writeAll(" " ++ f.name);
+ }
+ }
+ try writer.writeAll(" }");
+ },
+ }
+ }
+
+ comptime {
+ std.debug.assert(@as(u8, @bitCast(Behavior.normal)) == (1 << 1));
+ std.debug.assert(@as(u8, @bitCast(Behavior.optional)) == (1 << 2));
+ std.debug.assert(@as(u8, @bitCast(Behavior.dev)) == (1 << 3));
+ std.debug.assert(@as(u8, @bitCast(Behavior.peer)) == (1 << 4));
+ std.debug.assert(@as(u8, @bitCast(Behavior.workspace)) == (1 << 5));
+ }
};
diff --git a/src/install/install.zig b/src/install/install.zig
index abac43493..c52b4bfa8 100644
--- a/src/install/install.zig
+++ b/src/install/install.zig
@@ -123,8 +123,8 @@ pub fn ExternalSlice(comptime Type: type) type {
pub fn ExternalSliceAligned(comptime Type: type, comptime alignment_: ?u29) type {
return extern struct {
- const alignment = alignment_ orelse @alignOf(*Type);
- const Slice = @This();
+ pub const alignment = alignment_ orelse @alignOf(*Type);
+ pub const Slice = @This();
pub const Child: type = Type;
@@ -170,7 +170,7 @@ pub const ExternalStringMap = extern struct {
value: ExternalStringList = .{},
};
-pub const PackageNameHash = u64;
+pub const PackageNameHash = u64; // Use String.Builder.stringHash to compute this
pub const Aligner = struct {
pub fn write(comptime Type: type, comptime Writer: type, writer: Writer, pos: usize) !usize {
@@ -1702,6 +1702,8 @@ pub const PackageManager = struct {
onWake: WakeHandler = .{},
ci_mode: bun.LazyBool(computeIsContinuousIntegration, @This(), "ci_mode") = .{},
+ peer_dependencies: std.ArrayListUnmanaged(DependencyID) = .{},
+
const PreallocatedNetworkTasks = std.BoundedArray(NetworkTask, 1024);
const NetworkTaskQueue = std.HashMapUnmanaged(u64, void, IdentityContext(u64), 80);
pub var verbose_install = false;
@@ -1818,6 +1820,7 @@ pub const PackageManager = struct {
dep_id,
&this.lockfile.buffers.dependencies.items[dep_id],
invalid_package_id,
+ false,
assignRootResolution,
failRootResolution,
) catch |err| {
@@ -1842,6 +1845,7 @@ pub const PackageManager = struct {
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
+ false,
log_level,
) catch |err| {
return .{ .failure = err };
@@ -2486,13 +2490,14 @@ pub const PackageManager = struct {
behavior: Behavior,
manifest: *const Npm.PackageManifest,
find_result: Npm.PackageManifest.FindResult,
+ install_peer: bool,
comptime successFn: SuccessFn,
) !?ResolvedPackageResult {
// Was this package already allocated? Let's reuse the existing one.
if (this.lockfile.getPackageID(
name_hash,
- if (behavior.isPeer()) version else null,
+ if (behavior.isPeer() and !install_peer) version else null,
&.{
.tag = .npm,
.value = .{
@@ -2508,7 +2513,7 @@ pub const PackageManager = struct {
.package = this.lockfile.packages.get(id),
.is_first_time = false,
};
- } else if (behavior.isPeer()) {
+ } else if (behavior.isPeer() and !install_peer) {
return null;
}
@@ -2613,7 +2618,7 @@ pub const PackageManager = struct {
if (comptime Environment.allow_assert) {
std.debug.assert(dependency_id < buffers.resolutions.items.len);
std.debug.assert(package_id < this.lockfile.packages.len);
- std.debug.assert(buffers.resolutions.items[dependency_id] == invalid_package_id);
+ // std.debug.assert(buffers.resolutions.items[dependency_id] == invalid_package_id);
}
buffers.resolutions.items[dependency_id] = package_id;
const string_buf = buffers.string_bytes.items;
@@ -2648,6 +2653,7 @@ pub const PackageManager = struct {
behavior: Behavior,
dependency_id: DependencyID,
resolution: PackageID,
+ install_peer: bool,
comptime successFn: SuccessFn,
) !?ResolvedPackageResult {
name.assertDefined();
@@ -2660,7 +2666,7 @@ pub const PackageManager = struct {
.npm, .dist_tag => {
if (version.tag == .npm) {
if (this.lockfile.workspace_versions.count() > 0) resolve_from_workspace: {
- if (this.lockfile.workspace_versions.get(@truncate(name_hash))) |workspace_version| {
+ if (this.lockfile.workspace_versions.get(name_hash)) |workspace_version| {
if (version.value.npm.version.satisfies(workspace_version)) {
const root_package = this.lockfile.rootPackage() orelse break :resolve_from_workspace;
const root_dependencies = root_package.dependencies.get(this.lockfile.buffers.dependencies.items);
@@ -2668,6 +2674,8 @@ pub const PackageManager = struct {
for (root_dependencies, root_resolutions) |root_dep, workspace_package_id| {
if (workspace_package_id != invalid_package_id and root_dep.version.tag == .workspace and root_dep.name_hash == name_hash) {
+ // make sure verifyResolutions sees this resolution as a valid package id
+ this.lockfile.buffers.resolutions.items[dependency_id] = workspace_package_id;
return .{
.package = this.lockfile.packages.get(workspace_package_id),
.is_first_time = false,
@@ -2699,6 +2707,7 @@ pub const PackageManager = struct {
behavior,
manifest,
find_result,
+ install_peer,
successFn,
);
},
@@ -2972,11 +2981,13 @@ pub const PackageManager = struct {
/// This must be a *const to prevent UB
dependency: *const Dependency,
resolution: PackageID,
+ install_peer: bool,
) !void {
return this.enqueueDependencyWithMainAndSuccessFn(
id,
dependency,
resolution,
+ install_peer,
assignResolution,
null,
);
@@ -2992,19 +3003,35 @@ pub const PackageManager = struct {
/// This must be a *const to prevent UB
dependency: *const Dependency,
resolution: PackageID,
+ install_peer: bool,
comptime successFn: SuccessFn,
comptime failFn: ?FailFn,
) !void {
- const name = dependency.realname();
+ var name = dependency.realname();
- const name_hash = switch (dependency.version.tag) {
+ var name_hash = switch (dependency.version.tag) {
.dist_tag, .git, .github, .npm, .tarball, .workspace => String.Builder.stringHash(this.lockfile.str(&name)),
else => dependency.name_hash,
};
- const version = dependency.version;
+ const version = version: {
+ if (this.lockfile.overrides.get(name_hash)) |new| {
+ debug("override: {s} -> {s}", .{ this.lockfile.str(&dependency.version.literal), this.lockfile.str(&new.literal) });
+ name = switch (new.tag) {
+ .dist_tag => new.value.dist_tag.name,
+ .git => new.value.git.package_name,
+ .github => new.value.github.package_name,
+ .npm => new.value.npm.name,
+ .tarball => new.value.tarball.package_name,
+ else => name,
+ };
+ name_hash = String.Builder.stringHash(this.lockfile.str(&name));
+ break :version new;
+ }
+ break :version dependency.version;
+ };
var loaded_manifest: ?Npm.PackageManifest = null;
- switch (dependency.version.tag) {
+ switch (version.tag) {
.dist_tag, .folder, .npm => {
retry_from_manifests_ptr: while (true) {
var resolve_result_ = this.getOrPutResolvedPackage(
@@ -3014,6 +3041,7 @@ pub const PackageManager = struct {
dependency.behavior,
id,
resolution,
+ install_peer,
successFn,
);
@@ -3116,13 +3144,13 @@ pub const PackageManager = struct {
"enqueueDependency({d}, {s}, {s}, {s}) = {d}",
.{
id,
- @tagName(dependency.version.tag),
+ @tagName(version.tag),
this.lockfile.str(&name),
this.lockfile.str(&version.literal),
result.package.meta.id,
},
);
- } else if (dependency.version.tag.isNPM()) {
+ } else if (version.tag.isNPM()) {
const name_str = this.lockfile.str(&name);
const task_id = Task.Id.forManifest(name_str);
@@ -3133,14 +3161,14 @@ pub const PackageManager = struct {
"enqueueDependency({d}, {s}, {s}, {s}) = task {d}",
.{
id,
- @tagName(dependency.version.tag),
+ @tagName(version.tag),
this.lockfile.str(&name),
this.lockfile.str(&version.literal),
task_id,
},
);
- if (!dependency.behavior.isPeer()) {
+ if (!dependency.behavior.isPeer() or install_peer) {
var network_entry = try this.network_dedupe_map.getOrPutContext(this.allocator, task_id, .{});
if (!network_entry.found_existing) {
if (this.options.enable.manifest_cache) {
@@ -3154,8 +3182,8 @@ pub const PackageManager = struct {
// If it's an exact package version already living in the cache
// We can skip the network request, even if it's beyond the caching period
- if (dependency.version.tag == .npm and dependency.version.value.npm.version.isExact()) {
- if (loaded_manifest.?.findByVersion(dependency.version.value.npm.version.head.head.range.left.version)) |find_result| {
+ if (version.tag == .npm and version.value.npm.version.isExact()) {
+ if (loaded_manifest.?.findByVersion(version.value.npm.version.head.head.range.left.version)) |find_result| {
if (this.getOrPutResolvedPackageWithFindResult(
name_hash,
name,
@@ -3164,6 +3192,7 @@ pub const PackageManager = struct {
dependency.behavior,
&loaded_manifest.?,
find_result,
+ install_peer,
successFn,
) catch null) |new_resolve_result| {
resolve_result_ = new_resolve_result;
@@ -3201,6 +3230,10 @@ pub const PackageManager = struct {
);
this.enqueueNetworkTask(network_task);
}
+ } else {
+ if (this.options.do.install_peer_dependencies) {
+ try this.peer_dependencies.append(this.allocator, id);
+ }
}
var manifest_entry_parse = try this.task_queue.getOrPutContext(this.allocator, task_id, .{});
@@ -3217,7 +3250,7 @@ pub const PackageManager = struct {
return;
},
.git => {
- const dep = &dependency.version.value.git;
+ const dep = &version.value.git;
const res = Resolution{
.tag = .git,
.value = .{
@@ -3245,7 +3278,7 @@ pub const PackageManager = struct {
"enqueueDependency({d}, {s}, {s}, {s}) = {s}",
.{
id,
- @tagName(dependency.version.tag),
+ @tagName(version.tag),
this.lockfile.str(&name),
this.lockfile.str(&version.literal),
url,
@@ -3296,7 +3329,7 @@ pub const PackageManager = struct {
}
},
.github => {
- const dep = &dependency.version.value.github;
+ const dep = &version.value.github;
const res = Resolution{
.tag = .github,
.value = .{
@@ -3323,7 +3356,7 @@ pub const PackageManager = struct {
"enqueueDependency({d}, {s}, {s}, {s}) = {s}",
.{
id,
- @tagName(dependency.version.tag),
+ @tagName(version.tag),
this.lockfile.str(&name),
this.lockfile.str(&version.literal),
url,
@@ -3350,6 +3383,7 @@ pub const PackageManager = struct {
dependency.behavior,
id,
resolution,
+ install_peer,
successFn,
) catch |err| brk: {
if (err == error.MissingPackageJSON) {
@@ -3403,7 +3437,7 @@ pub const PackageManager = struct {
"enqueueDependency({d}, {s}, {s}, {s}) = {d}",
.{
id,
- @tagName(dependency.version.tag),
+ @tagName(version.tag),
this.lockfile.str(&name),
this.lockfile.str(&version.literal),
result.package.meta.id,
@@ -3458,7 +3492,7 @@ pub const PackageManager = struct {
}
},
.tarball => {
- const res: Resolution = switch (dependency.version.value.tarball.uri) {
+ const res: Resolution = switch (version.value.tarball.uri) {
.local => |path| .{
.tag = .local_tarball,
.value = .{
@@ -3479,7 +3513,7 @@ pub const PackageManager = struct {
return;
}
- const url = switch (dependency.version.value.tarball.uri) {
+ const url = switch (version.value.tarball.uri) {
.local => |path| this.lockfile.str(&path),
.remote => |url| this.lockfile.str(&url),
};
@@ -3494,7 +3528,7 @@ pub const PackageManager = struct {
"enqueueDependency({d}, {s}, {s}, {s}) = {s}",
.{
id,
- @tagName(dependency.version.tag),
+ @tagName(version.tag),
this.lockfile.str(&name),
this.lockfile.str(&version.literal),
url,
@@ -3505,7 +3539,7 @@ pub const PackageManager = struct {
try entry.value_ptr.append(this.allocator, @unionInit(TaskCallbackContext, callback_tag, id));
if (dependency.behavior.isPeer()) return;
- switch (dependency.version.value.tarball.uri) {
+ switch (version.value.tarball.uri) {
.local => {
const network_entry = try this.network_dedupe_map.getOrPutContext(this.allocator, task_id, .{});
if (network_entry.found_existing) return;
@@ -3554,6 +3588,7 @@ pub const PackageManager = struct {
i,
&dependency,
lockfile.buffers.resolutions.items[i],
+ false,
) catch {};
}
}
@@ -3593,8 +3628,36 @@ pub const PackageManager = struct {
const lockfile = this.lockfile;
// Step 1. Go through main dependencies
- var i = dependencies_list.off;
+ var begin = dependencies_list.off;
const end = dependencies_list.off +| dependencies_list.len;
+
+ // if dependency is peer and is going to be installed
+ // through "dependencies", skip it
+ if (end - begin > 1 and lockfile.buffers.dependencies.items[0].behavior.isPeer()) {
+ var peer_i: usize = 0;
+ var peer = &lockfile.buffers.dependencies.items[peer_i];
+ while (peer.behavior.isPeer()) {
+ var dep_i: usize = end - 1;
+ var dep = lockfile.buffers.dependencies.items[dep_i];
+ while (!dep.behavior.isPeer()) {
+ if (!dep.behavior.isDev()) {
+ if (peer.name_hash == dep.name_hash) {
+ peer.* = lockfile.buffers.dependencies.items[begin];
+ begin += 1;
+ break;
+ }
+ }
+ dep_i -= 1;
+ dep = lockfile.buffers.dependencies.items[dep_i];
+ }
+ peer_i += 1;
+ if (peer_i == end) break;
+ peer = &lockfile.buffers.dependencies.items[peer_i];
+ }
+ }
+
+ var i = begin;
+
// we have to be very careful with pointers here
while (i < end) : (i += 1) {
const dependency = lockfile.buffers.dependencies.items[i];
@@ -3603,6 +3666,7 @@ pub const PackageManager = struct {
i,
&dependency,
resolution,
+ false,
) catch |err| {
const note = .{
.fmt = "error occured while resolving {s}",
@@ -3633,7 +3697,12 @@ pub const PackageManager = struct {
_ = this.scheduleTasks();
}
- fn processDependencyListItem(this: *PackageManager, item: TaskCallbackContext, any_root: ?*bool) !void {
+ fn processDependencyListItem(
+ this: *PackageManager,
+ item: TaskCallbackContext,
+ any_root: ?*bool,
+ install_peer: bool,
+ ) !void {
switch (item) {
.dependency => |dependency_id| {
const dependency = this.lockfile.buffers.dependencies.items[dependency_id];
@@ -3643,6 +3712,7 @@ pub const PackageManager = struct {
dependency_id,
&dependency,
resolution,
+ install_peer,
);
},
.root_dependency => |dependency_id| {
@@ -3653,6 +3723,7 @@ pub const PackageManager = struct {
dependency_id,
&dependency,
resolution,
+ install_peer,
assignRootResolution,
failRootResolution,
);
@@ -3667,18 +3738,37 @@ pub const PackageManager = struct {
}
}
+ fn processPeerDependencyList(
+ this: *PackageManager,
+ ) !void {
+ if (this.peer_dependencies.items.len > 0) {
+ for (this.peer_dependencies.items) |peer_dependency_id| {
+ try this.processDependencyListItem(.{ .dependency = peer_dependency_id }, null, true);
+ const dependency = this.lockfile.buffers.dependencies.items[peer_dependency_id];
+ const resolution = this.lockfile.buffers.resolutions.items[peer_dependency_id];
+ try this.enqueueDependencyWithMain(
+ peer_dependency_id,
+ &dependency,
+ resolution,
+ true,
+ );
+ }
+ }
+ }
+
fn processDependencyList(
this: *PackageManager,
dep_list: TaskCallbackList,
comptime Context: type,
ctx: Context,
comptime callbacks: anytype,
+ install_peer: bool,
) !void {
if (dep_list.items.len > 0) {
var dependency_list = dep_list;
var any_root = false;
for (dependency_list.items) |item| {
- try this.processDependencyListItem(item, &any_root);
+ try this.processDependencyListItem(item, &any_root, install_peer);
}
if (comptime @TypeOf(callbacks) != void and @TypeOf(callbacks.onResolve) != void) {
@@ -3877,6 +3967,7 @@ pub const PackageManager = struct {
comptime ExtractCompletionContext: type,
extract_ctx: ExtractCompletionContext,
comptime callbacks: anytype,
+ install_peer: bool,
comptime log_level: Options.LogLevel,
) anyerror!void {
var has_updated_this_run = false;
@@ -4072,7 +4163,7 @@ pub const PackageManager = struct {
var dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
- try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks);
+ try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks, install_peer);
continue;
}
@@ -4249,7 +4340,7 @@ pub const PackageManager = struct {
var dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
- try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks);
+ try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks, install_peer);
if (comptime log_level.showProgress()) {
if (!has_updated_this_run) {
@@ -4335,7 +4426,7 @@ pub const PackageManager = struct {
},
else => unreachable,
}
- try manager.processDependencyListItem(dep, &any_root);
+ try manager.processDependencyListItem(dep, &any_root, install_peer);
},
else => {
// if it's a node_module folder to install, handle that after we process all the dependencies within the onExtract callback.
@@ -4350,12 +4441,15 @@ pub const PackageManager = struct {
var dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
- try manager.processDependencyList(dependency_list, void, {}, {});
+ try manager.processDependencyList(dependency_list, void, {}, {}, install_peer);
}
manager.setPreinstallState(package_id, manager.lockfile, .done);
if (comptime @TypeOf(callbacks.onExtract) != void) {
+ if (ExtractCompletionContext == *PackageInstaller) {
+ extract_ctx.fixCachedLockfilePackageSlices();
+ }
callbacks.onExtract(extract_ctx, dependency_id, task.data.extract, comptime log_level);
}
@@ -4401,7 +4495,7 @@ pub const PackageManager = struct {
var dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
- try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks);
+ try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks, install_peer);
if (comptime log_level.showProgress()) {
if (!has_updated_this_run) {
@@ -4461,7 +4555,7 @@ pub const PackageManager = struct {
var repo = &manager.lockfile.buffers.dependencies.items[id].version.value.git;
repo.resolved = pkg.resolution.value.git.resolved;
repo.package_name = pkg.name;
- try manager.processDependencyListItem(dep, &any_root);
+ try manager.processDependencyListItem(dep, &any_root, install_peer);
},
else => {
// if it's a node_module folder to install, handle that after we process all the dependencies within the onExtract callback.
@@ -4725,6 +4819,7 @@ pub const PackageManager = struct {
}
if (bun_install.save_peer) |save| {
+ this.do.install_peer_dependencies = save;
this.remote_package_features.peer_dependencies = save;
}
@@ -4995,6 +5090,7 @@ pub const PackageManager = struct {
print_meta_hash_string: bool = false,
verify_integrity: bool = true,
summary: bool = true,
+ install_peer_dependencies: bool = true,
};
pub const Enable = struct {
@@ -6276,7 +6372,6 @@ pub const PackageManager = struct {
request.name = allocator.dupe(u8, name) catch unreachable;
request.name_hash = String.Builder.stringHash(name);
} else if (version.tag == .github and version.value.github.committish.isEmpty()) {
- request.name = input;
request.name_hash = String.Builder.stringHash(version.literal.slice(input));
} else {
request.name_hash = String.Builder.stringHash(version.literal.slice(input));
@@ -6768,6 +6863,7 @@ pub const PackageManager = struct {
folder_path_buf: [bun.MAX_PATH_BYTES]u8 = undefined,
install_count: usize = 0,
successfully_installed: Bitset,
+ tree_iterator: *Lockfile.Tree.Iterator,
// For linking native binaries, we only want to link after we've installed the companion dependencies
// We don't want to introduce dependent callbacks like that for every single package
@@ -6779,6 +6875,16 @@ pub const PackageManager = struct {
node_modules_folder: std.fs.IterableDir,
};
+ /// Call when you mutate the length of `lockfile.packages`
+ pub fn fixCachedLockfilePackageSlices(this: *PackageInstaller) void {
+ var packages = this.lockfile.packages.slice();
+ this.metas = packages.items(.meta);
+ this.names = packages.items(.name);
+ this.bins = packages.items(.bin);
+ this.resolutions = packages.items(.resolution);
+ this.tree_iterator.reload(this.lockfile);
+ }
+
/// Install versions of a package which are waiting on a network request
pub fn installEnqueuedPackages(
this: *PackageInstaller,
@@ -7387,38 +7493,38 @@ pub const PackageManager = struct {
var summary = PackageInstall.Summary{};
{
- var parts = lockfile.packages.slice();
- var metas = parts.items(.meta);
- var names = parts.items(.name);
- var dependencies = lockfile.buffers.dependencies.items;
- const resolutions_buffer: []const PackageID = lockfile.buffers.resolutions.items;
- const resolution_lists: []const Lockfile.PackageIDSlice = parts.items(.resolutions);
- var resolutions = parts.items(.resolution);
-
var iterator = Lockfile.Tree.Iterator.init(lockfile);
- var installer = PackageInstaller{
- .manager = this,
- .options = &this.options,
- .metas = metas,
- .bins = parts.items(.bin),
- .root_node_modules_folder = node_modules_folder,
- .names = names,
- .resolutions = resolutions,
- .lockfile = lockfile,
- .node = &install_node,
- .node_modules_folder = node_modules_folder,
- .progress = progress,
- .skip_verify_installed_version_number = skip_verify_installed_version_number,
- .skip_delete = skip_delete,
- .summary = &summary,
- .global_bin_dir = this.options.global_bin_dir,
- .force_install = force_install,
- .install_count = lockfile.buffers.hoisted_dependencies.items.len,
- .successfully_installed = try Bitset.initEmpty(
- this.allocator,
- lockfile.packages.len,
- ),
+ var installer: PackageInstaller = brk: {
+ // These slices potentially get resized during iteration
+ // so we want to make sure they're not accessible to the rest of this function
+ // to make mistakes harder
+ var parts = lockfile.packages.slice();
+
+ break :brk PackageInstaller{
+ .manager = this,
+ .options = &this.options,
+ .metas = parts.items(.meta),
+ .bins = parts.items(.bin),
+ .root_node_modules_folder = node_modules_folder,
+ .names = parts.items(.name),
+ .resolutions = parts.items(.resolution),
+ .lockfile = lockfile,
+ .node = &install_node,
+ .node_modules_folder = node_modules_folder,
+ .progress = progress,
+ .skip_verify_installed_version_number = skip_verify_installed_version_number,
+ .skip_delete = skip_delete,
+ .summary = &summary,
+ .global_bin_dir = this.options.global_bin_dir,
+ .force_install = force_install,
+ .install_count = lockfile.buffers.hoisted_dependencies.items.len,
+ .successfully_installed = try Bitset.initEmpty(
+ this.allocator,
+ lockfile.packages.len,
+ ),
+ .tree_iterator = &iterator,
+ };
};
while (iterator.nextNodeModulesFolder()) |node_modules| {
@@ -7460,6 +7566,7 @@ pub const PackageManager = struct {
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
+ true,
log_level,
);
if (!installer.options.do.install_packages) return error.InstallFailed;
@@ -7479,6 +7586,7 @@ pub const PackageManager = struct {
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
+ true,
log_level,
);
if (!installer.options.do.install_packages) return error.InstallFailed;
@@ -7494,6 +7602,7 @@ pub const PackageManager = struct {
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
+ true,
log_level,
);
@@ -7508,87 +7617,95 @@ pub const PackageManager = struct {
if (!installer.options.do.install_packages) return error.InstallFailed;
summary.successfully_installed = installer.successfully_installed;
- outer: for (installer.platform_binlinks.items) |deferred| {
- const dependency_id = deferred.dependency_id;
- const package_id = resolutions_buffer[dependency_id];
- const folder = deferred.node_modules_folder;
-
- const package_resolutions: []const PackageID = resolution_lists[package_id].get(resolutions_buffer);
- const original_bin: Bin = installer.bins[package_id];
-
- for (package_resolutions) |resolved_id| {
- if (resolved_id >= names.len) continue;
- const meta: Lockfile.Package.Meta = metas[resolved_id];
-
- // This is specifically for platform-specific binaries
- if (meta.os == .all and meta.arch == .all) continue;
-
- // Don't attempt to link incompatible binaries
- if (meta.isDisabled()) continue;
-
- const name = lockfile.str(&dependencies[dependency_id].name);
-
- if (!installer.has_created_bin) {
- if (!this.options.global) {
- if (comptime Environment.isWindows) {
- std.os.mkdiratW(node_modules_folder.dir.fd, bun.strings.w(".bin"), 0) catch {};
- } else {
- node_modules_folder.dir.makeDirZ(".bin") catch {};
+ {
+ var parts = lockfile.packages.slice();
+ var metas = parts.items(.meta);
+ var names = parts.items(.name);
+ var dependencies = lockfile.buffers.dependencies.items;
+ const resolutions_buffer: []const PackageID = lockfile.buffers.resolutions.items;
+ const resolution_lists: []const Lockfile.PackageIDSlice = parts.items(.resolutions);
+ outer: for (installer.platform_binlinks.items) |deferred| {
+ const dependency_id = deferred.dependency_id;
+ const package_id = resolutions_buffer[dependency_id];
+ const folder = deferred.node_modules_folder;
+
+ const package_resolutions: []const PackageID = resolution_lists[package_id].get(resolutions_buffer);
+ const original_bin: Bin = installer.bins[package_id];
+
+ for (package_resolutions) |resolved_id| {
+ if (resolved_id >= names.len) continue;
+ const meta: Lockfile.Package.Meta = metas[resolved_id];
+
+ // This is specifically for platform-specific binaries
+ if (meta.os == .all and meta.arch == .all) continue;
+
+ // Don't attempt to link incompatible binaries
+ if (meta.isDisabled()) continue;
+
+ const name = lockfile.str(&dependencies[dependency_id].name);
+
+ if (!installer.has_created_bin) {
+ if (!this.options.global) {
+ if (comptime Environment.isWindows) {
+ std.os.mkdiratW(node_modules_folder.dir.fd, bun.strings.w(".bin"), 0) catch {};
+ } else {
+ node_modules_folder.dir.makeDirZ(".bin") catch {};
+ }
}
+ if (comptime Environment.isPosix)
+ Bin.Linker.umask = C.umask(0);
+ installer.has_created_bin = true;
}
- if (comptime Environment.isPosix)
- Bin.Linker.umask = C.umask(0);
- installer.has_created_bin = true;
- }
- var bin_linker = Bin.Linker{
- .bin = original_bin,
- .package_installed_node_modules = bun.toFD(folder.dir.fd),
- .root_node_modules_folder = bun.toFD(node_modules_folder.dir.fd),
- .global_bin_path = this.options.bin_path,
- .global_bin_dir = this.options.global_bin_dir.dir,
+ var bin_linker = Bin.Linker{
+ .bin = original_bin,
+ .package_installed_node_modules = bun.toFD(folder.dir.fd),
+ .root_node_modules_folder = bun.toFD(node_modules_folder.dir.fd),
+ .global_bin_path = this.options.bin_path,
+ .global_bin_dir = this.options.global_bin_dir.dir,
- .package_name = strings.StringOrTinyString.init(name),
- .string_buf = lockfile.buffers.string_bytes.items,
- .extern_string_buf = lockfile.buffers.extern_strings.items,
- };
+ .package_name = strings.StringOrTinyString.init(name),
+ .string_buf = lockfile.buffers.string_bytes.items,
+ .extern_string_buf = lockfile.buffers.extern_strings.items,
+ };
- bin_linker.link(this.options.global);
+ bin_linker.link(this.options.global);
- if (bin_linker.err) |err| {
- if (comptime log_level != .silent) {
- const fmt = "\n<r><red>error:<r> linking <b>{s}<r>: {s}\n";
- const args = .{ name, @errorName(err) };
+ if (bin_linker.err) |err| {
+ if (comptime log_level != .silent) {
+ const fmt = "\n<r><red>error:<r> linking <b>{s}<r>: {s}\n";
+ const args = .{ name, @errorName(err) };
- if (comptime log_level.showProgress()) {
- switch (Output.enable_ansi_colors) {
- inline else => |enable_ansi_colors| {
- this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args);
- },
+ if (comptime log_level.showProgress()) {
+ switch (Output.enable_ansi_colors) {
+ inline else => |enable_ansi_colors| {
+ this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args);
+ },
+ }
+ } else {
+ Output.prettyErrorln(fmt, args);
}
- } else {
- Output.prettyErrorln(fmt, args);
}
+
+ if (this.options.enable.fail_early) Global.crash();
}
- if (this.options.enable.fail_early) Global.crash();
+ continue :outer;
}
- continue :outer;
- }
-
- if (comptime log_level != .silent) {
- const fmt = "\n<r><yellow>warn:<r> no compatible binaries found for <b>{s}<r>\n";
- const args = .{lockfile.str(&names[package_id])};
+ if (comptime log_level != .silent) {
+ const fmt = "\n<r><yellow>warn:<r> no compatible binaries found for <b>{s}<r>\n";
+ const args = .{lockfile.str(&names[package_id])};
- if (comptime log_level.showProgress()) {
- switch (Output.enable_ansi_colors) {
- inline else => |enable_ansi_colors| {
- this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args);
- },
+ if (comptime log_level.showProgress()) {
+ switch (Output.enable_ansi_colors) {
+ inline else => |enable_ansi_colors| {
+ this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args);
+ },
+ }
+ } else {
+ Output.prettyErrorln(fmt, args);
}
- } else {
- Output.prettyErrorln(fmt, args);
}
}
}
@@ -7647,15 +7764,17 @@ pub const PackageManager = struct {
)
else
.{ .not_found = {} };
+
var root = Lockfile.Package{};
- var needs_new_lockfile = load_lockfile_result != .ok or (load_lockfile_result.ok.buffers.dependencies.items.len == 0 and manager.package_json_updates.len > 0);
+ var needs_new_lockfile = load_lockfile_result != .ok or
+ (load_lockfile_result.ok.buffers.dependencies.items.len == 0 and manager.package_json_updates.len > 0);
+
// this defaults to false
// but we force allowing updates to the lockfile when you do bun add
var had_any_diffs = false;
manager.progress = .{};
// Step 2. Parse the package.json file
- //
var package_json_source = logger.Source.initPathString(package_json_cwd, package_json_contents);
switch (load_lockfile_result) {
@@ -7671,6 +7790,9 @@ pub const PackageManager = struct {
.read_file => Output.prettyError("<r><red>error<r> reading lockfile:<r> {s}\n<r>", .{
@errorName(cause.value),
}),
+ .migrating => Output.prettyError("<r><red>error<r> migrating lockfile:<r> {s}\n<r>", .{
+ @errorName(cause.value),
+ }),
}
if (manager.options.enable.fail_early) {
@@ -7749,6 +7871,8 @@ pub const PackageManager = struct {
new_dep.count(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder);
}
+ lockfile.overrides.count(&lockfile, builder);
+
maybe_root.scripts.count(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder);
const off = @as(u32, @truncate(manager.lockfile.buffers.dependencies.items.len));
@@ -7762,6 +7886,27 @@ pub const PackageManager = struct {
manager.root_dependency_list = dep_lists[0];
try builder.allocate();
+ const all_name_hashes: []PackageNameHash = brk: {
+ if (!manager.summary.overrides_changed) break :brk &.{};
+ const hashes_len = manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len;
+ if (hashes_len == 0) break :brk &.{};
+ var all_name_hashes = try bun.default_allocator.alloc(PackageNameHash, hashes_len);
+ @memcpy(all_name_hashes[0..manager.lockfile.overrides.map.entries.len], manager.lockfile.overrides.map.keys());
+ @memcpy(all_name_hashes[manager.lockfile.overrides.map.entries.len..], lockfile.overrides.map.keys());
+ var i = manager.lockfile.overrides.map.entries.len;
+ while (i < all_name_hashes.len) {
+ if (std.mem.indexOfScalar(PackageNameHash, all_name_hashes[0..i], all_name_hashes[i]) != null) {
+ all_name_hashes[i] = all_name_hashes[all_name_hashes.len - 1];
+ all_name_hashes.len -= 1;
+ } else {
+ i += 1;
+ }
+ }
+ break :brk all_name_hashes;
+ };
+
+ manager.lockfile.overrides = try lockfile.overrides.clone(&lockfile, manager.lockfile, builder);
+
try manager.lockfile.buffers.dependencies.ensureUnusedCapacity(manager.lockfile.allocator, len);
try manager.lockfile.buffers.resolutions.ensureUnusedCapacity(manager.lockfile.allocator, len);
@@ -7784,6 +7929,20 @@ pub const PackageManager = struct {
}
}
+ if (manager.summary.overrides_changed and all_name_hashes.len > 0) {
+ for (manager.lockfile.buffers.dependencies.items, 0..) |*dependency, dependency_i| {
+ if (std.mem.indexOfScalar(PackageNameHash, all_name_hashes, dependency.name_hash)) |_| {
+ manager.lockfile.buffers.resolutions.items[dependency_i] = invalid_package_id;
+ try manager.enqueueDependencyWithMain(
+ @truncate(dependency_i),
+ dependency,
+ manager.lockfile.buffers.resolutions.items[dependency_i],
+ false,
+ );
+ }
+ }
+ }
+
manager.lockfile.packages.items(.scripts)[0] = maybe_root.scripts.clone(
lockfile.buffers.string_bytes.items,
*Lockfile.StringBuilder,
@@ -7808,6 +7967,7 @@ pub const PackageManager = struct {
dependency_i,
&dependency,
manager.lockfile.buffers.resolutions.items[dependency_i],
+ false,
);
}
}
@@ -7861,7 +8021,7 @@ pub const PackageManager = struct {
manager.drainDependencyList();
}
- if (manager.pending_tasks > 0) {
+ if (manager.pending_tasks > 0 or manager.peer_dependencies.items.len > 0) {
if (root.dependencies.len > 0) {
_ = manager.getCacheDirectory();
_ = manager.getTemporaryDirectory();
@@ -7885,6 +8045,7 @@ pub const PackageManager = struct {
.onPackageDownloadError = {},
.progress_bar = true,
},
+ false,
log_level,
);
@@ -7896,6 +8057,35 @@ pub const PackageManager = struct {
manager.sleep();
}
+ if (manager.options.do.install_peer_dependencies) {
+ try manager.processPeerDependencyList();
+
+ manager.drainDependencyList();
+
+ while (manager.pending_tasks > 0) {
+ try manager.runTasks(
+ *PackageManager,
+ manager,
+ .{
+ .onExtract = {},
+ .onResolve = {},
+ .onPackageManifestError = {},
+ .onPackageDownloadError = {},
+ .progress_bar = true,
+ },
+ true,
+ log_level,
+ );
+
+ if (PackageManager.verbose_install and manager.pending_tasks > 0) {
+ Output.prettyErrorln("<d>[PackageManager]<r> waiting for {d} tasks\n", .{manager.pending_tasks});
+ }
+
+ if (manager.pending_tasks > 0)
+ manager.sleep();
+ }
+ }
+
if (comptime log_level.showProgress()) {
manager.endProgressBar();
} else if (comptime log_level != .silent) {
diff --git a/src/install/integrity.zig b/src/install/integrity.zig
index dd11140de..c0b02d4bf 100644
--- a/src/install/integrity.zig
+++ b/src/install/integrity.zig
@@ -3,34 +3,26 @@ const strings = @import("../string_immutable.zig");
const Crypto = @import("../sha.zig").Hashers;
pub const Integrity = extern struct {
+ const empty_digest_buf: [Integrity.digest_buf_len]u8 = [_]u8{0} ** Integrity.digest_buf_len;
+
tag: Tag = Tag.unknown,
/// Possibly a [Subresource Integrity](https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity) value initially
/// We transform it though.
- value: [digest_buf_len]u8 = undefined,
+ value: [digest_buf_len]u8 = empty_digest_buf,
const Base64 = std.base64.standard_no_pad;
- pub const digest_buf_len: usize = brk: {
- const values = [_]usize{
- std.crypto.hash.Sha1.digest_length,
- std.crypto.hash.sha2.Sha512.digest_length,
- std.crypto.hash.sha2.Sha256.digest_length,
- std.crypto.hash.sha2.Sha384.digest_length,
- };
-
- var value: usize = 0;
- for (values) |val| {
- value = @max(val, value);
- }
-
- break :brk value;
- };
+ pub const digest_buf_len: usize = @max(
+ std.crypto.hash.Sha1.digest_length,
+ std.crypto.hash.sha2.Sha512.digest_length,
+ std.crypto.hash.sha2.Sha256.digest_length,
+ std.crypto.hash.sha2.Sha384.digest_length,
+ );
pub fn parseSHASum(buf: []const u8) !Integrity {
if (buf.len == 0) {
return Integrity{
.tag = Tag.unknown,
- .value = undefined,
};
}
@@ -40,8 +32,11 @@ pub const Integrity = extern struct {
var out_i: usize = 0;
var i: usize = 0;
- {
- @memset(&integrity.value, 0);
+ // initializer should zero it out
+ if (comptime @import("root").bun.Environment.allow_assert) {
+ for (integrity.value) |c| {
+ std.debug.assert(c == 0);
+ }
}
while (i < end) {
@@ -74,23 +69,20 @@ pub const Integrity = extern struct {
if (buf.len < "sha256-".len) {
return Integrity{
.tag = Tag.unknown,
- .value = undefined,
};
}
- var out: [digest_buf_len]u8 = undefined;
+ var out: [digest_buf_len]u8 = empty_digest_buf;
const tag = Tag.parse(buf);
if (tag == Tag.unknown) {
return Integrity{
.tag = Tag.unknown,
- .value = undefined,
};
}
Base64.Decoder.decode(&out, std.mem.trimRight(u8, buf["sha256-".len..], "=")) catch {
return Integrity{
.tag = Tag.unknown,
- .value = undefined,
};
};
@@ -203,4 +195,13 @@ pub const Integrity = extern struct {
unreachable;
}
+
+ comptime {
+ var integrity = Integrity{ .tag = Tag.sha1 };
+ for (integrity.value) |c| {
+ if (c != 0) {
+ @compileError("Integrity buffer is not zeroed");
+ }
+ }
+ }
};
diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig
index dc0a64475..834846768 100644
--- a/src/install/lockfile.zig
+++ b/src/install/lockfile.zig
@@ -21,6 +21,7 @@ const json_parser = bun.JSON;
const JSPrinter = bun.js_printer;
const linker = @import("../linker.zig");
+const migration = @import("./migration.zig");
const sync = @import("../sync.zig");
const Api = @import("../api/schema.zig").Api;
@@ -92,7 +93,7 @@ const assertNoUninitializedPadding = @import("./padding_checker.zig").assertNoUn
// Serialized data
/// The version of the lockfile format, intended to prevent data corruption for format changes.
-format: FormatVersion = .v1,
+format: FormatVersion = FormatVersion.current,
meta_hash: MetaHash = zero_hash,
@@ -111,6 +112,8 @@ trusted_dependencies: NameHashSet = .{},
workspace_paths: NameHashMap = .{},
workspace_versions: VersionHashMap = .{},
+overrides: OverrideMap = .{},
+
const Stream = std.io.FixedBufferStream([]u8);
pub const default_filename = "bun.lockb";
@@ -159,7 +162,7 @@ pub fn isEmpty(this: *const Lockfile) bool {
return this.packages.len == 0 or this.packages.len == 1 or this.packages.get(0).resolutions.len == 0;
}
-pub const LoadFromDiskResult = union(Tag) {
+pub const LoadFromDiskResult = union(enum) {
not_found: void,
err: struct {
step: Step,
@@ -167,26 +170,30 @@ pub const LoadFromDiskResult = union(Tag) {
},
ok: *Lockfile,
- pub const Step = enum { open_file, read_file, parse_file };
-
- pub const Tag = enum {
- not_found,
- err,
- ok,
- };
+ pub const Step = enum { open_file, read_file, parse_file, migrating };
};
pub fn loadFromDisk(this: *Lockfile, allocator: Allocator, log: *logger.Log, filename: stringZ) LoadFromDiskResult {
if (comptime Environment.allow_assert) std.debug.assert(FileSystem.instance_loaded);
- var file = std.io.getStdIn();
- if (filename.len > 0)
- file = std.fs.cwd().openFileZ(filename, .{ .mode = .read_only }) catch |err| {
+ var file = if (filename.len > 0)
+ std.fs.cwd().openFileZ(filename, .{ .mode = .read_only }) catch |err| {
return switch (err) {
- error.FileNotFound, error.AccessDenied, error.BadPathName => LoadFromDiskResult{ .not_found = {} },
+ error.FileNotFound => {
+ // Attempt to load from "package-lock.json", "yarn.lock", etc.
+ return migration.detectAndLoadOtherLockfile(
+ this,
+ allocator,
+ log,
+ filename,
+ );
+ },
+ error.AccessDenied, error.BadPathName => LoadFromDiskResult{ .not_found = {} },
else => LoadFromDiskResult{ .err = .{ .step = .open_file, .value = err } },
};
- };
+ }
+ else
+ std.io.getStdIn();
defer file.close();
var buf = file.readToEndAlloc(allocator, std.math.maxInt(usize)) catch |err| {
@@ -204,11 +211,16 @@ pub fn loadFromBytes(this: *Lockfile, buf: []u8, allocator: Allocator, log: *log
this.trusted_dependencies = .{};
this.workspace_paths = .{};
this.workspace_versions = .{};
+ this.overrides = .{};
Lockfile.Serializer.load(this, &stream, allocator, log) catch |err| {
return LoadFromDiskResult{ .err = .{ .step = .parse_file, .value = err } };
};
+ if (Environment.allow_assert) {
+ this.verifyData() catch @panic("lockfile data is corrupt");
+ }
+
return LoadFromDiskResult{ .ok = this };
}
@@ -289,6 +301,14 @@ pub const Tree = struct {
};
}
+ pub fn reload(this: *Iterator, lockfile: *const Lockfile) void {
+ this.trees = lockfile.buffers.trees.items;
+ this.dependency_ids = lockfile.buffers.hoisted_dependencies.items;
+ this.dependencies = lockfile.buffers.dependencies.items;
+ this.resolutions = lockfile.buffers.resolutions.items;
+ this.string_buf = lockfile.buffers.string_bytes.items;
+ }
+
pub fn nextNodeModulesFolder(this: *Iterator) ?NodeModulesFolder {
if (this.tree_id >= this.trees.len) return null;
@@ -714,6 +734,13 @@ pub fn cleanWithLogger(
old.scratch.dependency_list_queue.head = 0;
+ {
+ var builder = new.stringBuilder();
+ old.overrides.count(old, &builder);
+ try builder.allocate();
+ new.overrides = try old.overrides.clone(old, new, &builder);
+ }
+
// Step 1. Recreate the lockfile with only the packages that are still alive
const root = old.rootPackage() orelse return error.NoPackage;
@@ -826,6 +853,7 @@ pub fn cleanWithLogger(
}
new.trusted_dependencies = old_trusted_dependencies;
new.scripts = old_scripts;
+
return new;
}
@@ -997,6 +1025,9 @@ pub const Printer = struct {
.read_file => Output.prettyErrorln("<r><red>error<r> reading lockfile:<r> {s}", .{
@errorName(cause.value),
}),
+ .migrating => Output.prettyErrorln("<r><red>error<r> while migrating lockfile:<r> {s}", .{
+ @errorName(cause.value),
+ }),
}
if (log.errors > 0) {
switch (Output.enable_ansi_colors) {
@@ -1260,6 +1291,24 @@ pub const Printer = struct {
comptime Writer: type,
writer: Writer,
) !void {
+ // internal for debugging, print the lockfile as custom json
+ // limited to debug because we don't want people to rely on this format.
+ if (Environment.isDebug) {
+ if (std.os.getenv("JSON")) |_| {
+ try std.json.stringify(
+ this.lockfile,
+ .{
+ .whitespace = .indent_2,
+ .emit_null_optional_fields = true,
+ .emit_nonportable_numbers_as_strings = true,
+ },
+ writer,
+ );
+ try writer.writeAll("\n");
+ return;
+ }
+ }
+
try writer.writeAll(
\\# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
\\# yarn lockfile v1
@@ -1416,7 +1465,7 @@ pub const Printer = struct {
var behavior = Behavior.uninitialized;
var dependency_behavior_change_count: u8 = 0;
for (dependencies) |dep| {
- if (dep.behavior != behavior) {
+ if (!dep.behavior.eq(behavior)) {
if (dep.behavior.isOptional()) {
try writer.writeAll(" optionalDependencies:\n");
if (comptime Environment.allow_assert) dependency_behavior_change_count += 1;
@@ -1458,20 +1507,18 @@ pub const Printer = struct {
pub fn verifyData(this: *Lockfile) !void {
std.debug.assert(this.format == Lockfile.FormatVersion.current);
- {
- var i: usize = 0;
- while (i < this.packages.len) : (i += 1) {
- const package: Lockfile.Package = this.packages.get(i);
- std.debug.assert(this.str(&package.name).len == @as(usize, package.name.len()));
- std.debug.assert(String.Builder.stringHash(this.str(&package.name)) == @as(usize, package.name_hash));
- std.debug.assert(package.dependencies.get(this.buffers.dependencies.items).len == @as(usize, package.dependencies.len));
- std.debug.assert(package.resolutions.get(this.buffers.resolutions.items).len == @as(usize, package.resolutions.len));
- std.debug.assert(package.resolutions.get(this.buffers.resolutions.items).len == @as(usize, package.dependencies.len));
- const dependencies = package.dependencies.get(this.buffers.dependencies.items);
- for (dependencies) |dependency| {
- std.debug.assert(this.str(&dependency.name).len == @as(usize, dependency.name.len()));
- std.debug.assert(String.Builder.stringHash(this.str(&dependency.name)) == dependency.name_hash);
- }
+ var i: usize = 0;
+ while (i < this.packages.len) : (i += 1) {
+ const package: Lockfile.Package = this.packages.get(i);
+ std.debug.assert(this.str(&package.name).len == @as(usize, package.name.len()));
+ std.debug.assert(String.Builder.stringHash(this.str(&package.name)) == @as(usize, package.name_hash));
+ std.debug.assert(package.dependencies.get(this.buffers.dependencies.items).len == @as(usize, package.dependencies.len));
+ std.debug.assert(package.resolutions.get(this.buffers.resolutions.items).len == @as(usize, package.resolutions.len));
+ std.debug.assert(package.resolutions.get(this.buffers.resolutions.items).len == @as(usize, package.dependencies.len));
+ const dependencies = package.dependencies.get(this.buffers.dependencies.items);
+ for (dependencies) |dependency| {
+ std.debug.assert(this.str(&dependency.name).len == @as(usize, dependency.name.len()));
+ std.debug.assert(String.Builder.stringHash(this.str(&dependency.name)) == dependency.name_hash);
}
}
}
@@ -1491,7 +1538,7 @@ pub fn verifyResolutions(this: *Lockfile, local_features: Features, remote_featu
for (resolution_list.get(resolutions_buffer), dependency_list.get(dependencies_buffer)) |package_id, failed_dep| {
if (package_id < end) continue;
if (failed_dep.behavior.isPeer() or !failed_dep.behavior.isEnabled(
- if (root_list.contains(@as(PackageID, @truncate(parent_id))))
+ if (root_list.contains(@truncate(parent_id)))
local_features
else
remote_features,
@@ -1688,7 +1735,7 @@ pub fn appendPackage(this: *Lockfile, package_: Lockfile.Package) !Lockfile.Pack
fn appendPackageWithID(this: *Lockfile, package_: Lockfile.Package, id: PackageID) !Lockfile.Package {
defer {
- if (comptime Environment.isDebug) {
+ if (comptime Environment.allow_assert) {
std.debug.assert(this.getPackageID(package_.name_hash, null, &package_.resolution) != null);
}
}
@@ -1849,13 +1896,307 @@ pub const PackageIndex = struct {
};
};
+pub const OverrideMap = struct {
+ const debug = Output.scoped(.OverrideMap, false);
+
+ map: std.ArrayHashMapUnmanaged(PackageNameHash, Dependency, ArrayIdentityContext.U64, false) = .{},
+
+ /// In the future, this `get` function should handle multi-level resolutions. This is difficult right
+ /// now because given a Dependency ID, there is no fast way to trace it to it's package.
+ ///
+ /// A potential approach is to add another buffer to the lockfile that maps Dependency ID to Package ID,
+ /// and from there `OverrideMap.map` can have a union as the value, where the union is between "override all"
+ /// and "here is a list of overrides depending on the package that imported" similar to PackageIndex above.
+ pub fn get(this: *const OverrideMap, name_hash: PackageNameHash) ?Dependency.Version {
+ debug("looking up override for {x}", .{name_hash});
+ return if (this.map.get(name_hash)) |dep|
+ dep.version
+ else
+ null;
+ }
+
+ pub fn deinit(this: *OverrideMap, allocator: Allocator) void {
+ this.map.deinit(allocator);
+ }
+
+ pub fn count(this: *OverrideMap, lockfile: *Lockfile, builder: *Lockfile.StringBuilder) void {
+ for (this.map.values()) |dep| {
+ dep.count(lockfile.buffers.string_bytes.items, @TypeOf(builder), builder);
+ }
+ }
+
+ pub fn clone(this: *OverrideMap, old_lockfile: *Lockfile, new_lockfile: *Lockfile, new_builder: *Lockfile.StringBuilder) !OverrideMap {
+ var new = OverrideMap{};
+ try new.map.ensureTotalCapacity(new_lockfile.allocator, this.map.entries.len);
+
+ for (this.map.keys(), this.map.values()) |k, v| {
+ new.map.putAssumeCapacity(
+ k,
+ try v.clone(old_lockfile.buffers.string_bytes.items, @TypeOf(new_builder), new_builder),
+ );
+ }
+
+ return new;
+ }
+
+ // the rest of this struct is expression parsing code:
+
+ pub fn parseCount(
+ _: *OverrideMap,
+ lockfile: *Lockfile,
+ expr: Expr,
+ builder: *Lockfile.StringBuilder,
+ ) void {
+ if (expr.asProperty("overrides")) |overrides| {
+ if (overrides.expr.data != .e_object)
+ return;
+
+ for (overrides.expr.data.e_object.properties.slice()) |entry| {
+ builder.count(entry.key.?.asString(lockfile.allocator).?);
+ switch (entry.value.?.data) {
+ .e_string => |s| {
+ builder.count(s.slice(lockfile.allocator));
+ },
+ .e_object => {
+ if (entry.value.?.asProperty(".")) |dot| {
+ if (dot.expr.asString(lockfile.allocator)) |s| {
+ builder.count(s);
+ }
+ }
+ },
+ else => {},
+ }
+ }
+ } else if (expr.asProperty("resolutions")) |resolutions| {
+ if (resolutions.expr.data != .e_object)
+ return;
+
+ for (resolutions.expr.data.e_object.properties.slice()) |entry| {
+ builder.count(entry.key.?.asString(lockfile.allocator).?);
+ builder.count(entry.value.?.asString(lockfile.allocator) orelse continue);
+ }
+ }
+ }
+
+ /// Given a package json expression, detect and parse override configuration into the given override map.
+ /// It is assumed the input map is uninitialized (zero entries)
+ pub fn parseAppend(
+ this: *OverrideMap,
+ lockfile: *Lockfile,
+ root_package: *Lockfile.Package,
+ log: *logger.Log,
+ json_source: logger.Source,
+ expr: Expr,
+ builder: *Lockfile.StringBuilder,
+ ) !void {
+ if (Environment.allow_assert) {
+ std.debug.assert(this.map.entries.len == 0); // only call parse once
+ }
+ if (expr.asProperty("overrides")) |overrides| {
+ try this.parseFromOverrides(lockfile, root_package, json_source, log, overrides.expr, builder);
+ } else if (expr.asProperty("resolutions")) |resolutions| {
+ try this.parseFromResolutions(lockfile, root_package, json_source, log, resolutions.expr, builder);
+ }
+ debug("parsed {d} overrides", .{this.map.entries.len});
+ }
+
+ /// https://docs.npmjs.com/cli/v9/configuring-npm/package-json#overrides
+ pub fn parseFromOverrides(
+ this: *OverrideMap,
+ lockfile: *Lockfile,
+ root_package: *Lockfile.Package,
+ source: logger.Source,
+ log: *logger.Log,
+ expr: Expr,
+ builder: *Lockfile.StringBuilder,
+ ) !void {
+ if (expr.data != .e_object) {
+ try log.addWarningFmt(&source, expr.loc, lockfile.allocator, "\"overrides\" must be an object", .{});
+ return error.Invalid;
+ }
+
+ try this.map.ensureUnusedCapacity(lockfile.allocator, expr.data.e_object.properties.len);
+
+ for (expr.data.e_object.properties.slice()) |prop| {
+ const key = prop.key.?;
+ var k = key.asString(lockfile.allocator).?;
+ if (k.len == 0) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Missing overridden package name", .{});
+ continue;
+ }
+
+ const name_hash = String.Builder.stringHash(k);
+
+ const value = value: {
+ // for one level deep, we will only support a string and { ".": value }
+ const value_expr = prop.value.?;
+ if (value_expr.data == .e_string) {
+ break :value value_expr;
+ } else if (value_expr.data == .e_object) {
+ if (value_expr.asProperty(".")) |dot| {
+ if (dot.expr.data == .e_string) {
+ if (value_expr.data.e_object.properties.len > 1) {
+ try log.addWarningFmt(&source, value_expr.loc, lockfile.allocator, "Bun currently does not support nested \"overrides\"", .{});
+ }
+ break :value dot.expr;
+ } else {
+ try log.addWarningFmt(&source, value_expr.loc, lockfile.allocator, "Invalid override value for \"{s}\"", .{k});
+ continue;
+ }
+ } else {
+ try log.addWarningFmt(&source, value_expr.loc, lockfile.allocator, "Bun currently does not support nested \"overrides\"", .{});
+ continue;
+ }
+ }
+ try log.addWarningFmt(&source, value_expr.loc, lockfile.allocator, "Invalid override value for \"{s}\"", .{k});
+ continue;
+ };
+
+ if (try parseOverrideValue(
+ "override",
+ lockfile,
+ root_package,
+ source,
+ value.loc,
+ log,
+ k,
+ value.data.e_string.slice(lockfile.allocator),
+ builder,
+ )) |version| {
+ this.map.putAssumeCapacity(name_hash, version);
+ }
+ }
+ }
+
+ /// yarn classic: https://classic.yarnpkg.com/lang/en/docs/selective-version-resolutions/
+ /// yarn berry: https://yarnpkg.com/configuration/manifest#resolutions
+ pub fn parseFromResolutions(
+ this: *OverrideMap,
+ lockfile: *Lockfile,
+ root_package: *Lockfile.Package,
+ source: logger.Source,
+ log: *logger.Log,
+ expr: Expr,
+ builder: *Lockfile.StringBuilder,
+ ) !void {
+ if (expr.data != .e_object) {
+ try log.addWarningFmt(&source, expr.loc, lockfile.allocator, "\"resolutions\" must be an object with string values", .{});
+ return;
+ }
+ try this.map.ensureUnusedCapacity(lockfile.allocator, expr.data.e_object.properties.len);
+ for (expr.data.e_object.properties.slice()) |prop| {
+ const key = prop.key.?;
+ var k = key.asString(lockfile.allocator).?;
+ if (strings.hasPrefixComptime(k, "**/"))
+ k = k[3..];
+ if (k.len == 0) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Missing resolution package name", .{});
+ continue;
+ }
+ const value = prop.value.?;
+ if (value.data != .e_string) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Expected string value for resolution \"{s}\"", .{k});
+ continue;
+ }
+ // currently we only support one level deep, so we should error if there are more than one
+ // - "foo/bar":
+ // - "@namespace/hello/world"
+ if (k[0] == '@') {
+ const first_slash = strings.indexOfChar(k, '/') orelse {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Invalid package name \"{s}\"", .{k});
+ continue;
+ };
+ if (strings.indexOfChar(k[first_slash + 1 ..], '/') != null) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Bun currently does not support nested \"resolutions\"", .{});
+ continue;
+ }
+ } else if (strings.indexOfChar(k, '/') != null) {
+ try log.addWarningFmt(&source, key.loc, lockfile.allocator, "Bun currently does not support nested \"resolutions\"", .{});
+ continue;
+ }
+
+ if (try parseOverrideValue(
+ "resolution",
+ lockfile,
+ root_package,
+ source,
+ value.loc,
+ log,
+ k,
+ value.data.e_string.data,
+ builder,
+ )) |version| {
+ const name_hash = String.Builder.stringHash(k);
+ this.map.putAssumeCapacity(name_hash, version);
+ }
+ }
+ }
+
+ pub fn parseOverrideValue(
+ comptime field: []const u8,
+ lockfile: *Lockfile,
+ root_package: *Lockfile.Package,
+ source: logger.Source,
+ loc: logger.Loc,
+ log: *logger.Log,
+ key: []const u8,
+ value: []const u8,
+ builder: *Lockfile.StringBuilder,
+ ) !?Dependency {
+ if (value.len == 0) {
+ try log.addWarningFmt(&source, loc, lockfile.allocator, "Missing " ++ field ++ " value", .{});
+ return null;
+ }
+
+ // "Overrides may also be defined as a reference to a spec for a direct dependency
+ // by prefixing the name of the package you wish the version to match with a `$`"
+ // https://docs.npmjs.com/cli/v9/configuring-npm/package-json#overrides
+ // This is why a `*Lockfile.Package` is needed here.
+ if (value[0] == '$') {
+ const ref_name = value[1..];
+ // This is fine for this string to not share the string pool, because it's only used for .eql()
+ const ref_name_str = String.init(ref_name, ref_name);
+ const pkg_deps: []const Dependency = root_package.dependencies.get(lockfile.buffers.dependencies.items);
+ for (pkg_deps) |dep| {
+ if (dep.name.eql(ref_name_str, lockfile.buffers.string_bytes.items, ref_name)) {
+ return dep;
+ }
+ }
+ try log.addWarningFmt(&source, loc, lockfile.allocator, "Could not resolve " ++ field ++ " \"{s}\" (you need \"{s}\" in your dependencies)", .{ value, ref_name });
+ return null;
+ }
+
+ const literalString = builder.append(String, value);
+ const literalSliced = literalString.sliced(lockfile.buffers.string_bytes.items);
+
+ const name_hash = String.Builder.stringHash(key);
+ const name = builder.appendWithHash(String, key, name_hash);
+
+ return Dependency{
+ .name = name,
+ .name_hash = name_hash,
+ .version = Dependency.parse(
+ lockfile.allocator,
+ name,
+ literalSliced.slice,
+ &literalSliced,
+ log,
+ ) orelse {
+ try log.addWarningFmt(&source, loc, lockfile.allocator, "Invalid " ++ field ++ " value \"{s}\"", .{value});
+ return null;
+ },
+ };
+ }
+};
+
pub const FormatVersion = enum(u32) {
- v0,
+ v0 = 0,
// bun v0.0.x - bun v0.1.6
- v1,
+ v1 = 1,
// bun v0.1.7+
// This change added tarball URLs to npm-resolved packages
- v2,
+ v2 = 2,
+
_,
pub const current = FormatVersion.v2;
};
@@ -1875,7 +2216,7 @@ pub const Package = extern struct {
name: String = .{},
name_hash: PackageNameHash = 0,
- /// How a package has been resolved
+ /// How this package has been resolved
/// When .tag is uninitialized, that means the package is not resolved yet.
resolution: Resolution = .{},
@@ -1884,8 +2225,18 @@ pub const Package = extern struct {
/// if resolutions[i] is an invalid package ID, then dependencies[i] is not resolved
dependencies: DependencySlice = .{},
- /// The resolved package IDs for the dependencies
- resolutions: DependencyIDSlice = .{},
+ /// The resolved package IDs for this package's dependencies. Instead of storing this
+ /// on the `Dependency` struct within `.dependencies`, it is stored on the package itself
+ /// so we can access it faster.
+ ///
+ /// Each index in this array corresponds to the same index in dependencies.
+ /// Each value in this array corresponds to the resolved package ID for that dependency.
+ ///
+ /// So this is how you say "what package ID for lodash does this package actually resolve to?"
+ ///
+ /// By default, the underlying buffer is filled with "invalid_id" to indicate this package ID
+ /// was not resolved
+ resolutions: PackageIDSlice = .{},
meta: Meta = .{},
bin: Bin = .{},
@@ -2023,11 +2374,11 @@ pub const Package = extern struct {
field: string,
behavior: Behavior,
- pub const dependencies = DependencyGroup{ .prop = "dependencies", .field = "dependencies", .behavior = @as(Behavior, @enumFromInt(Behavior.normal)) };
- pub const dev = DependencyGroup{ .prop = "devDependencies", .field = "dev_dependencies", .behavior = @as(Behavior, @enumFromInt(Behavior.dev)) };
- pub const optional = DependencyGroup{ .prop = "optionalDependencies", .field = "optional_dependencies", .behavior = @as(Behavior, @enumFromInt(Behavior.optional)) };
- pub const peer = DependencyGroup{ .prop = "peerDependencies", .field = "peer_dependencies", .behavior = @as(Behavior, @enumFromInt(Behavior.peer)) };
- pub const workspaces = DependencyGroup{ .prop = "workspaces", .field = "workspaces", .behavior = @as(Behavior, @enumFromInt(Behavior.workspace)) };
+ pub const dependencies = DependencyGroup{ .prop = "dependencies", .field = "dependencies", .behavior = Behavior.normal };
+ pub const dev = DependencyGroup{ .prop = "devDependencies", .field = "dev_dependencies", .behavior = Behavior.dev };
+ pub const optional = DependencyGroup{ .prop = "optionalDependencies", .field = "optional_dependencies", .behavior = Behavior.optional };
+ pub const peer = DependencyGroup{ .prop = "peerDependencies", .field = "peer_dependencies", .behavior = Behavior.peer };
+ pub const workspaces = DependencyGroup{ .prop = "workspaces", .field = "workspaces", .behavior = Behavior.workspace };
};
pub inline fn isDisabled(this: *const Lockfile.Package) bool {
@@ -2461,6 +2812,7 @@ pub const Package = extern struct {
add: u32 = 0,
remove: u32 = 0,
update: u32 = 0,
+ overrides_changed: bool = false,
pub inline fn sum(this: *Summary, that: Summary) void {
this.add += that.add;
@@ -2469,7 +2821,7 @@ pub const Package = extern struct {
}
pub inline fn hasDiffs(this: Summary) bool {
- return this.add > 0 or this.remove > 0 or this.update > 0;
+ return this.add > 0 or this.remove > 0 or this.update > 0 or this.overrides_changed;
}
};
@@ -2490,6 +2842,22 @@ pub const Package = extern struct {
var to_i: usize = 0;
var skipped_workspaces: usize = 0;
+ if (from_lockfile.overrides.map.count() != to_lockfile.overrides.map.count()) {
+ summary.overrides_changed = true;
+ } else {
+ for (
+ from_lockfile.overrides.map.keys(),
+ from_lockfile.overrides.map.values(),
+ to_lockfile.overrides.map.keys(),
+ to_lockfile.overrides.map.values(),
+ ) |from_k, *from_override, to_k, *to_override| {
+ if ((from_k != to_k) or (!from_override.eql(to_override, from_lockfile.buffers.string_bytes.items, to_lockfile.buffers.string_bytes.items))) {
+ summary.overrides_changed = true;
+ break;
+ }
+ }
+ }
+
for (from_deps, 0..) |*from_dep, i| {
found: {
const prev_i = to_i;
@@ -3425,15 +3793,7 @@ pub const Package = extern struct {
return error.InvalidPackageJSON;
}
for (obj.properties.slice()) |item| {
- const key = item.key.?.asString(allocator) orelse {
- log.addErrorFmt(&source, item.key.?.loc, allocator,
- \\{0s} expects a map of specifiers, e.g.
- \\"{0s}": {{
- \\ "bun": "latest"
- \\}}
- , .{group.prop}) catch {};
- return error.InvalidPackageJSON;
- };
+ const key = item.key.?.asString(allocator).?;
const value = item.value.?.asString(allocator) orelse {
log.addErrorFmt(&source, item.value.?.loc, allocator,
\\{0s} expects a map of specifiers, e.g.
@@ -3508,6 +3868,10 @@ pub const Package = extern struct {
}
}
+ if (comptime features.is_main) {
+ lockfile.overrides.parseCount(lockfile, json, &string_builder);
+ }
+
try string_builder.allocate();
try lockfile.buffers.dependencies.ensureUnusedCapacity(lockfile.allocator, total_dependencies_count);
try lockfile.buffers.resolutions.ensureUnusedCapacity(lockfile.allocator, total_dependencies_count);
@@ -3729,24 +4093,34 @@ pub const Package = extern struct {
lockfile.buffers.dependencies.items = lockfile.buffers.dependencies.items.ptr[0..new_len];
lockfile.buffers.resolutions.items = lockfile.buffers.resolutions.items.ptr[0..new_len];
+ // This function depends on package.dependencies being set, so it is done at the very end.
+ if (comptime features.is_main) {
+ try lockfile.overrides.parseAppend(lockfile, package, log, source, json, &string_builder);
+ }
+
string_builder.clamp();
}
- pub const List = std.MultiArrayList(Lockfile.Package);
+ pub const List = bun.MultiArrayList(Lockfile.Package);
pub const Meta = extern struct {
+ // TODO: when we bump the lockfile version, we should reorder this to:
+ // id(32), arch(16), os(16), id(8), man_dir(8), integrity(72 align 8)
+ // should allow us to remove padding bytes
+
+ // TODO: remove origin. it doesnt do anything and can be inferred from the resolution
origin: Origin = Origin.npm,
_padding_origin: u8 = 0,
arch: Npm.Architecture = Npm.Architecture.all,
os: Npm.OperatingSystem = Npm.OperatingSystem.all,
-
_padding_os: u16 = 0,
id: PackageID = invalid_package_id,
man_dir: String = String{},
integrity: Integrity = Integrity{},
+ _padding_integrity: [3]u8 = .{0} ** 3,
/// Does the `cpu` arch and `os` match the requirements listed in the package?
/// This is completely unrelated to "devDependencies", "peerDependencies", "optionalDependencies" etc
@@ -3759,11 +4133,14 @@ pub const Package = extern struct {
}
pub fn clone(this: *const Meta, id: PackageID, buf: []const u8, comptime StringBuilderType: type, builder: StringBuilderType) Meta {
- var new = this.*;
- new.id = id;
- new.man_dir = builder.append(String, this.man_dir.slice(buf));
-
- return new;
+ return Meta{
+ .id = id,
+ .man_dir = builder.append(String, this.man_dir.slice(buf)),
+ .integrity = this.integrity,
+ .arch = this.arch,
+ .os = this.os,
+ .origin = this.origin,
+ };
}
};
@@ -3840,6 +4217,8 @@ pub const Package = extern struct {
inline for (FieldsEnum.fields) |field| {
const value = sliced.items(@field(Lockfile.Package.List.Field, field.name));
+ if (comptime Environment.allow_assert)
+ debug("save(\"{s}\") = {d} bytes", .{ field.name, std.mem.sliceAsBytes(value).len });
comptime assertNoUninitializedPadding(@TypeOf(value));
try writer.writeAll(std.mem.sliceAsBytes(value));
@@ -3921,16 +4300,20 @@ pub fn deinit(this: *Lockfile) void {
this.trusted_dependencies.deinit(this.allocator);
this.workspace_paths.deinit(this.allocator);
this.workspace_versions.deinit(this.allocator);
+ this.overrides.deinit(this.allocator);
}
const Buffers = struct {
trees: Tree.List = .{},
hoisted_dependencies: DependencyIDList = .{},
+ /// This is the underlying buffer used for the `resolutions` external slices inside of `Package`
+ /// Should be the same length as `dependencies`
resolutions: PackageIDList = .{},
+ /// This is the underlying buffer used for the `dependencies` external slices inside of `Package`
dependencies: DependencyList = .{},
+ /// This is the underlying buffer used for any `Semver.ExternalString` instance in the lockfile
extern_strings: ExternalStringBuffer = .{},
- // node_modules_folders: NodeModulesFolderList = NodeModulesFolderList{},
- // node_modules_package_ids: PackageIDList = PackageIDList{},
+ /// This is where all non-inlinable `Semver.String`s are stored.
string_bytes: StringBuffer = .{},
pub fn deinit(this: *Buffers, allocator: Allocator) void {
@@ -4221,6 +4604,8 @@ pub const Serializer = struct {
const header_bytes: string = "#!/usr/bin/env bun\n" ++ version;
const has_workspace_package_ids_tag: u64 = @bitCast([_]u8{ 'w', 'O', 'r', 'K', 's', 'P', 'a', 'C' });
+ const has_trusted_dependencies_tag: u64 = @bitCast([_]u8{ 't', 'R', 'u', 'S', 't', 'E', 'D', 'd' });
+ const has_overrides_tag: u64 = @bitCast([_]u8{ 'o', 'V', 'e', 'R', 'r', 'i', 'D', 's' });
pub fn save(this: *Lockfile, comptime StreamType: type, stream: StreamType) !void {
var old_package_list = this.packages;
@@ -4282,6 +4667,47 @@ pub const Serializer = struct {
);
}
+ if (this.trusted_dependencies.count() > 0) {
+ try writer.writeAll(std.mem.asBytes(&has_trusted_dependencies_tag));
+
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []u32,
+ this.trusted_dependencies.keys(),
+ );
+ }
+
+ if (this.overrides.map.count() > 0) {
+ try writer.writeAll(std.mem.asBytes(&has_overrides_tag));
+
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []PackageNameHash,
+ this.overrides.map.keys(),
+ );
+ var external_overrides = try std.ArrayListUnmanaged(Dependency.External).initCapacity(z_allocator, this.overrides.map.count());
+ defer external_overrides.deinit(z_allocator);
+ external_overrides.items.len = this.overrides.map.count();
+ for (external_overrides.items, this.overrides.map.values()) |*dest, src| {
+ dest.* = src.toExternal();
+ }
+
+ try Lockfile.Buffers.writeArray(
+ StreamType,
+ stream,
+ @TypeOf(&writer),
+ &writer,
+ []Dependency.External,
+ external_overrides.items,
+ );
+ }
+
const end = try stream.getPos();
try writer.writeAll(&alignment_bytes_to_repeat_buffer);
@@ -4393,6 +4819,66 @@ pub const Serializer = struct {
}
}
+ {
+ const remaining_in_buffer = total_buffer_size -| stream.pos;
+
+ if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) {
+ const next_num = try reader.readIntLittle(u64);
+ if (next_num == has_trusted_dependencies_tag) {
+ var trusted_dependencies_hashes = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(u32),
+ );
+ defer trusted_dependencies_hashes.deinit(allocator);
+
+ try lockfile.trusted_dependencies.ensureTotalCapacity(allocator, trusted_dependencies_hashes.items.len);
+
+ lockfile.trusted_dependencies.entries.len = trusted_dependencies_hashes.items.len;
+ @memcpy(lockfile.trusted_dependencies.keys(), trusted_dependencies_hashes.items);
+ try lockfile.trusted_dependencies.reIndex(allocator);
+ } else {
+ stream.pos -= 8;
+ }
+ }
+ }
+
+ {
+ const remaining_in_buffer = total_buffer_size -| stream.pos;
+
+ if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) {
+ const next_num = try reader.readIntLittle(u64);
+ if (next_num == has_overrides_tag) {
+ var overrides_name_hashes = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(PackageNameHash),
+ );
+ defer overrides_name_hashes.deinit(allocator);
+
+ var map = lockfile.overrides.map;
+ defer lockfile.overrides.map = map;
+
+ try map.ensureTotalCapacity(allocator, overrides_name_hashes.items.len);
+ var override_versions_external = try Lockfile.Buffers.readArray(
+ stream,
+ allocator,
+ std.ArrayListUnmanaged(Dependency.External),
+ );
+ const context: Dependency.Context = .{
+ .allocator = allocator,
+ .log = log,
+ .buffer = lockfile.buffers.string_bytes.items,
+ };
+ for (overrides_name_hashes.items, override_versions_external.items) |name, value| {
+ map.putAssumeCapacity(name, Dependency.toDependency(value, context));
+ }
+ } else {
+ stream.pos -= 8;
+ }
+ }
+ }
+
lockfile.scratch = Lockfile.Scratch.init(allocator);
lockfile.package_index = PackageIndex.Map.initContext(allocator, .{});
lockfile.string_pool = StringPool.initContext(allocator, .{});
@@ -4432,7 +4918,7 @@ pub fn hasMetaHashChanged(this: *Lockfile, print_name_version_string: bool) !boo
this.meta_hash = try this.generateMetaHash(print_name_version_string);
return !strings.eqlLong(&previous_meta_hash, &this.meta_hash, false);
}
-fn generateMetaHash(this: *Lockfile, print_name_version_string: bool) !MetaHash {
+pub fn generateMetaHash(this: *Lockfile, print_name_version_string: bool) !MetaHash {
if (this.packages.len <= 1)
return zero_hash;
@@ -4562,3 +5048,294 @@ pub fn resolve(this: *Lockfile, package_name: []const u8, version: Dependency.Ve
return null;
}
+
+pub fn jsonStringifyDependency(this: *const Lockfile, w: anytype, dep: Dependency, res: ?PackageID) !void {
+ const sb = this.buffers.string_bytes.items;
+ var buf: [2048]u8 = undefined;
+
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("literal");
+ try w.write(dep.version.literal.slice(sb));
+
+ try w.objectField(@tagName(dep.version.tag));
+ switch (dep.version.tag) {
+ .uninitialized => try w.write(null),
+ .npm => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Dependency.Version.NpmInfo = dep.version.value.npm;
+
+ try w.objectField("name");
+ try w.write(info.name.slice(sb));
+
+ try w.objectField("version");
+ try w.write(try std.fmt.bufPrint(&buf, "{}", .{info.version}));
+ },
+ .dist_tag => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Dependency.Version.TagInfo = dep.version.value.dist_tag;
+
+ try w.objectField("name");
+ try w.write(info.name.slice(sb));
+
+ try w.objectField("tag");
+ try w.write(info.name.slice(sb));
+ },
+ .tarball => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Dependency.Version.TarballInfo = dep.version.value.tarball;
+ try w.objectField(@tagName(info.uri));
+ try w.write(switch (info.uri) {
+ inline else => |s| s.slice(sb),
+ });
+
+ try w.objectField("package_name");
+ try w.write(info.package_name.slice(sb));
+ },
+ .folder => {
+ try w.write(dep.version.value.folder.slice(sb));
+ },
+ .symlink => {
+ try w.write(dep.version.value.symlink.slice(sb));
+ },
+ .workspace => {
+ try w.write(dep.version.value.workspace.slice(sb));
+ },
+ .git => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Repository = dep.version.value.git;
+
+ try w.objectField("owner");
+ try w.write(info.owner.slice(sb));
+ try w.objectField("repo");
+ try w.write(info.repo.slice(sb));
+ try w.objectField("committish");
+ try w.write(info.committish.slice(sb));
+ try w.objectField("resolved");
+ try w.write(info.resolved.slice(sb));
+ try w.objectField("package_name");
+ try w.write(info.package_name.slice(sb));
+ },
+ .github => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const info: Repository = dep.version.value.github;
+
+ try w.objectField("owner");
+ try w.write(info.owner.slice(sb));
+ try w.objectField("repo");
+ try w.write(info.repo.slice(sb));
+ try w.objectField("committish");
+ try w.write(info.committish.slice(sb));
+ try w.objectField("resolved");
+ try w.write(info.resolved.slice(sb));
+ try w.objectField("package_name");
+ try w.write(info.package_name.slice(sb));
+ },
+ }
+
+ try w.objectField("resolved_id");
+ try w.write(if (res) |r| if (r == invalid_package_id) null else r else null);
+
+ const behavior = try std.fmt.bufPrint(&buf, "{}", .{dep.behavior});
+ try w.objectField("behavior");
+ try w.write(behavior);
+}
+
+pub fn jsonStringify(this: *const Lockfile, w: anytype) !void {
+ var buf: [2048]u8 = undefined;
+ const sb = this.buffers.string_bytes.items;
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("format");
+ try w.write(@tagName(this.format));
+ try w.objectField("meta_hash");
+ try w.write(std.fmt.bytesToHex(this.meta_hash, .lower));
+
+ {
+ try w.objectField("package_index");
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ var iter = this.package_index.iterator();
+ while (iter.next()) |it| {
+ const entry: PackageIndex.Entry = it.value_ptr.*;
+ const first_id = switch (entry) {
+ .PackageID => |id| id,
+ .PackageIDMultiple => |ids| ids.items[0],
+ };
+ const name = this.packages.items(.name)[first_id].slice(sb);
+ try w.objectField(name);
+ switch (entry) {
+ .PackageID => |id| try w.write(id),
+ .PackageIDMultiple => |ids| {
+ try w.beginArray();
+ for (ids.items) |id| {
+ try w.write(id);
+ }
+ try w.endArray();
+ },
+ }
+ }
+ }
+ {
+ try w.objectField("packages");
+ try w.beginArray();
+ defer w.endArray() catch {};
+
+ for (0..this.packages.len) |i| {
+ const pkg: Package = this.packages.get(i);
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("id");
+ try w.write(i);
+
+ try w.objectField("name");
+ try w.write(pkg.name.slice(sb));
+
+ try w.objectField("name_hash");
+ try w.write(pkg.name_hash);
+
+ try w.objectField("resolution");
+ if (pkg.resolution.tag == .uninitialized) {
+ try w.write(null);
+ } else {
+ const b = try std.fmt.bufPrint(&buf, "{s} {s}", .{ @tagName(pkg.resolution.tag), pkg.resolution.fmt(sb) });
+ try w.write(b);
+ }
+
+ try w.objectField("dependencies");
+ {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ for (pkg.dependencies.get(this.buffers.dependencies.items), pkg.resolutions.get(this.buffers.resolutions.items)) |dep_, res| {
+ const dep: Dependency = dep_;
+ try w.objectField(dep.name.slice(sb));
+ try this.jsonStringifyDependency(w, dep, res);
+ }
+ }
+
+ if (@as(u16, @intFromEnum(pkg.meta.arch)) != Npm.Architecture.all_value) {
+ try w.objectField("arch");
+ try w.beginArray();
+ defer w.endArray() catch {};
+
+ for (Npm.Architecture.NameMap.kvs) |kv| {
+ if (pkg.meta.arch.has(kv.value)) {
+ try w.write(kv.key);
+ }
+ }
+ }
+
+ if (@as(u16, @intFromEnum(pkg.meta.os)) != Npm.OperatingSystem.all_value) {
+ try w.objectField("os");
+ try w.beginArray();
+ defer w.endArray() catch {};
+
+ for (Npm.OperatingSystem.NameMap.kvs) |kv| {
+ if (pkg.meta.os.has(kv.value)) {
+ try w.write(kv.key);
+ }
+ }
+ }
+
+ try w.objectField("integrity");
+ if (pkg.meta.integrity.tag != .unknown) {
+ try w.write(try std.fmt.bufPrint(&buf, "{}", .{pkg.meta.integrity}));
+ } else {
+ try w.write(null);
+ }
+
+ try w.objectField("man_dir");
+ try w.write(pkg.meta.man_dir.slice(sb));
+
+ try w.objectField("origin");
+ try w.write(@tagName(pkg.meta.origin));
+
+ try w.objectField("bin");
+ switch (pkg.bin.tag) {
+ .none => try w.write(null),
+ .file => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("file");
+ try w.write(pkg.bin.value.file.slice(sb));
+ },
+ .named_file => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ try w.objectField("name");
+ try w.write(pkg.bin.value.named_file[0].slice(sb));
+
+ try w.objectField("file");
+ try w.write(pkg.bin.value.named_file[1].slice(sb));
+ },
+ .dir => {
+ try w.objectField("dir");
+ try w.write(pkg.bin.value.dir.slice(sb));
+ },
+ .map => {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ const data: []const ExternalString = pkg.bin.value.map.get(this.buffers.extern_strings.items);
+ var bin_i: usize = 0;
+ while (bin_i < data.len) : (bin_i += 2) {
+ try w.objectField(data[bin_i].slice(sb));
+ try w.write(data[bin_i + 1].slice(sb));
+ }
+ },
+ }
+
+ {
+ try w.objectField("scripts");
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ inline for (comptime std.meta.fieldNames(Lockfile.Scripts)) |field_name| {
+ var script = @field(pkg.scripts, field_name).slice(sb);
+ if (script.len > 0) {
+ try w.objectField(field_name);
+ try w.write(script);
+ }
+ }
+ }
+ }
+ }
+
+ try w.objectField("workspace_paths");
+ {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ for (this.workspace_paths.keys(), this.workspace_paths.values()) |k, v| {
+ try w.objectField(try std.fmt.bufPrint(&buf, "{d}", .{k}));
+ try w.write(v.slice(sb));
+ }
+ }
+ try w.objectField("workspace_versions");
+ {
+ try w.beginObject();
+ defer w.endObject() catch {};
+
+ for (this.workspace_versions.keys(), this.workspace_versions.values()) |k, v| {
+ try w.objectField(try std.fmt.bufPrint(&buf, "{d}", .{k}));
+ try w.write(try std.fmt.bufPrint(&buf, "{}", .{v.fmt(sb)}));
+ }
+ }
+}
diff --git a/src/install/migration.zig b/src/install/migration.zig
new file mode 100644
index 000000000..d74be7265
--- /dev/null
+++ b/src/install/migration.zig
@@ -0,0 +1,947 @@
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+
+const bun = @import("root").bun;
+const string = bun.string;
+const Output = bun.Output;
+const Global = bun.Global;
+const Environment = bun.Environment;
+const strings = bun.strings;
+const MutableString = bun.MutableString;
+const stringZ = bun.stringZ;
+const logger = bun.logger;
+
+const Install = @import("./install.zig");
+const Resolution = @import("./resolution.zig").Resolution;
+const Dependency = @import("./dependency.zig");
+const VersionedURL = @import("./versioned_url.zig");
+const Npm = @import("./npm.zig");
+const Integrity = @import("./integrity.zig").Integrity;
+const Bin = @import("./bin.zig").Bin;
+
+const Semver = @import("./semver.zig");
+const String = Semver.String;
+const ExternalString = Semver.ExternalString;
+const stringHash = String.Builder.stringHash;
+
+const Lockfile = @import("./lockfile.zig");
+const LoadFromDiskResult = Lockfile.LoadFromDiskResult;
+
+const JSAst = bun.JSAst;
+const Expr = JSAst.Expr;
+const B = JSAst.B;
+const E = JSAst.E;
+const G = JSAst.G;
+const S = JSAst.S;
+
+const debug = Output.scoped(.migrate, false);
+
+pub fn detectAndLoadOtherLockfile(this: *Lockfile, allocator: Allocator, log: *logger.Log, bun_lockfile_path: stringZ) LoadFromDiskResult {
+ const dirname = bun_lockfile_path[0 .. strings.lastIndexOfChar(bun_lockfile_path, '/') orelse 0];
+ // check for package-lock.json, yarn.lock, etc...
+ // if it exists, do an in-memory migration
+ var buf: [bun.MAX_PATH_BYTES]u8 = undefined;
+ @memcpy(buf[0..dirname.len], dirname);
+
+ const cwd = std.fs.cwd();
+
+ npm: {
+ const npm_lockfile_name = "package-lock.json";
+ @memcpy(buf[dirname.len .. dirname.len + npm_lockfile_name.len], npm_lockfile_name);
+ buf[dirname.len + npm_lockfile_name.len] = 0;
+ const lockfile_path = buf[0 .. dirname.len + npm_lockfile_name.len :0];
+ var timer = std.time.Timer.start() catch unreachable;
+ const file = cwd.openFileZ(lockfile_path, .{ .mode = .read_only }) catch break :npm;
+ defer file.close();
+ var data = file.readToEndAlloc(allocator, std.math.maxInt(usize)) catch |err| {
+ return LoadFromDiskResult{ .err = .{ .step = .migrating, .value = err } };
+ };
+ const lockfile = migrateNPMLockfile(this, allocator, log, data, lockfile_path) catch |err| {
+ if (err == error.NPMLockfileVersionMismatch) {
+ Output.prettyErrorln(
+ \\<red><b>error<r><d>:<r> Please upgrade package-lock.json to lockfileVersion 3
+ \\
+ \\Run 'npm i --lockfile-version 3 --frozen-lockfile' to upgrade your lockfile without changing dependencies.
+ , .{});
+ Global.exit(1);
+ }
+ if (Environment.allow_assert) {
+ const maybe_trace = @errorReturnTrace();
+ Output.prettyErrorln("Error: {s}", .{@errorName(err)});
+ log.printForLogLevel(Output.errorWriter()) catch {};
+ if (maybe_trace) |trace| {
+ std.debug.dumpStackTrace(trace.*);
+ }
+ Output.prettyErrorln("Invalid NPM package-lock.json\nIn a release build, this would ignore and do a fresh install.\nAborting", .{});
+ Global.exit(1);
+ }
+ return LoadFromDiskResult{ .err = .{ .step = .migrating, .value = err } };
+ };
+
+ if (lockfile == .ok) {
+ Output.printElapsed(@as(f64, @floatFromInt(timer.read())) / std.time.ns_per_ms);
+ Output.prettyError(" ", .{});
+ Output.prettyErrorln("<d>migrated lockfile from <r><green>package-lock.json<r>", .{});
+ Output.flush();
+ }
+
+ return lockfile;
+ }
+
+ return LoadFromDiskResult{ .not_found = {} };
+}
+
+const IdMap = std.StringHashMapUnmanaged(IdMapValue);
+const IdMapValue = struct {
+ /// index into the old package-lock.json package entries.
+ old_json_index: u32,
+ /// this is the new package id for the bun lockfile
+ ///
+ /// - if this new_package_id is set to `package_id_is_link`, it means it's a link
+ /// and to get the actual package id, you need to lookup `.resolved` in the hashmap.
+ /// - if it is `package_id_is_bundled`, it means it's a bundled dependency that was not
+ /// marked by npm, which can happen to some transitive dependencies.
+ new_package_id: u32,
+};
+const package_id_is_link = std.math.maxInt(u32);
+const package_id_is_bundled = std.math.maxInt(u32) - 1;
+
+const unset_package_id = Install.invalid_package_id - 1;
+
+const dependency_keys = .{
+ .dependencies,
+ .devDependencies,
+ .peerDependencies,
+ .optionalDependencies,
+};
+
+pub fn migrateNPMLockfile(this: *Lockfile, allocator: Allocator, log: *logger.Log, data: string, path: string) !LoadFromDiskResult {
+ debug("begin lockfile migration", .{});
+
+ try this.initEmpty(allocator);
+ Install.initializeStore();
+
+ const json_src = logger.Source.initPathString(path, data);
+ const json = bun.JSON.ParseJSONUTF8(&json_src, log, allocator) catch return error.InvalidNPMLockfile;
+
+ if (json.data != .e_object) {
+ return error.InvalidNPMLockfile;
+ }
+ if (json.get("lockfileVersion")) |version| {
+ if (!(version.data == .e_number and version.data.e_number.value == 3)) {
+ return error.NPMLockfileVersionMismatch;
+ }
+ } else {
+ return error.InvalidNPMLockfile;
+ }
+
+ // Count pass
+ var builder_ = this.stringBuilder();
+ var builder = &builder_;
+ const name = (if (json.get("name")) |expr| expr.asString(allocator) else null) orelse "";
+ builder.count(name);
+
+ var root_package: *E.Object = undefined;
+ var packages_properties = brk: {
+ const obj = json.get("packages") orelse return error.InvalidNPMLockfile;
+ if (obj.data != .e_object) return error.InvalidNPMLockfile;
+ if (obj.data.e_object.properties.len == 0) return error.InvalidNPMLockfile;
+ const prop1 = obj.data.e_object.properties.at(0);
+ if (prop1.key) |k| {
+ if (k.data != .e_string) return error.InvalidNPMLockfile;
+ // first key must be the "", self reference
+ if (k.data.e_string.data.len != 0) return error.InvalidNPMLockfile;
+ if (prop1.value.?.data != .e_object) return error.InvalidNPMLockfile;
+ root_package = prop1.value.?.data.e_object;
+ } else return error.InvalidNPMLockfile;
+ break :brk obj.data.e_object.properties;
+ };
+
+ var num_deps: u32 = 0;
+
+ const workspace_map: ?Lockfile.Package.WorkspaceMap = workspace_map: {
+ if (root_package.get("workspaces")) |wksp| {
+ var workspaces = Lockfile.Package.WorkspaceMap.init(allocator);
+
+ const json_array = switch (wksp.data) {
+ .e_array => |arr| arr,
+ .e_object => |obj| if (obj.get("packages")) |packages| switch (packages.data) {
+ .e_array => |arr| arr,
+ else => return error.InvalidNPMLockfile,
+ } else return error.InvalidNPMLockfile,
+ else => return error.InvalidNPMLockfile,
+ };
+
+ const workspace_packages_count = try Lockfile.Package.processWorkspaceNamesArray(
+ &workspaces,
+ allocator,
+ log,
+ json_array,
+ &json_src,
+ wksp.loc,
+ builder,
+ );
+ debug("found {d} workspace packages", .{workspace_packages_count});
+ num_deps += workspace_packages_count;
+ break :workspace_map workspaces;
+ }
+ break :workspace_map null;
+ };
+
+ // Counting Phase
+ // This "IdMap" is used to make object key lookups faster for the `packages` object
+ // it also lets us resolve linked and bundled packages.
+ var id_map = IdMap{};
+ try id_map.ensureTotalCapacity(allocator, packages_properties.len);
+ var num_extern_strings: u32 = 0;
+ var package_idx: u32 = 0;
+ for (packages_properties.slice(), 0..) |entry, i| {
+ const pkg_path = entry.key.?.asString(allocator).?;
+ if (entry.value.?.data != .e_object)
+ return error.InvalidNPMLockfile;
+
+ const pkg = entry.value.?.data.e_object;
+
+ if (pkg.get("link") != null) {
+ id_map.putAssumeCapacity(
+ pkg_path,
+ IdMapValue{
+ .old_json_index = @truncate(i),
+ .new_package_id = package_id_is_link,
+ },
+ );
+ continue;
+ }
+ if (pkg.get("inBundle")) |x| if (x.data == .e_boolean and x.data.e_boolean.value) {
+ id_map.putAssumeCapacity(
+ pkg_path,
+ IdMapValue{
+ .old_json_index = @truncate(i),
+ .new_package_id = package_id_is_bundled,
+ },
+ );
+ continue;
+ };
+ if (pkg.get("extraneous")) |x| if (x.data == .e_boolean and x.data.e_boolean.value) {
+ continue;
+ };
+
+ id_map.putAssumeCapacity(
+ pkg_path,
+ IdMapValue{
+ .old_json_index = @truncate(i),
+ .new_package_id = package_idx,
+ },
+ );
+ package_idx += 1;
+
+ inline for (dependency_keys) |dep_key| {
+ if (pkg.get(@tagName(dep_key))) |deps| {
+ if (deps.data != .e_object) {
+ return error.InvalidNPMLockfile;
+ }
+ num_deps +|= @as(u32, deps.data.e_object.properties.len);
+
+ for (deps.data.e_object.properties.slice()) |dep| {
+ const dep_name = dep.key.?.asString(allocator).?;
+ const version_string = dep.value.?.asString(allocator) orelse return error.InvalidNPMLockfile;
+
+ builder.count(dep_name);
+ builder.count(version_string);
+
+ // If it's a folder or workspace, pessimistically assume we will need a maximum path
+ switch (Dependency.Version.Tag.infer(version_string)) {
+ .folder, .workspace => builder.cap += bun.MAX_PATH_BYTES,
+ else => {},
+ }
+ }
+ }
+ }
+
+ if (pkg.get("bin")) |bin| {
+ if (bin.data != .e_object) return error.InvalidNPMLockfile;
+ switch (bin.data.e_object.properties.len) {
+ 0 => return error.InvalidNPMLockfile,
+ 1 => {
+ const first_bin = bin.data.e_object.properties.at(0);
+ const key = first_bin.key.?.asString(allocator).?;
+
+ const workspace_entry = if (workspace_map) |map| map.map.get(pkg_path) else null;
+ const is_workspace = workspace_entry != null;
+
+ const pkg_name = if (is_workspace)
+ workspace_entry.?.name
+ else if (entry.value.?.get("name")) |set_name|
+ (set_name.asString(this.allocator) orelse return error.InvalidNPMLockfile)
+ else
+ packageNameFromPath(pkg_path);
+
+ if (!strings.eql(key, pkg_name)) {
+ builder.count(key);
+ }
+ builder.count(first_bin.value.?.asString(allocator) orelse return error.InvalidNPMLockfile);
+ },
+ else => {
+ for (bin.data.e_object.properties.slice()) |bin_entry| {
+ builder.count(bin_entry.key.?.asString(allocator).?);
+ builder.count(bin_entry.value.?.asString(allocator) orelse return error.InvalidNPMLockfile);
+ }
+ num_extern_strings += @truncate(bin.data.e_object.properties.len * 2);
+ },
+ }
+ }
+
+ if (pkg.get("resolved")) |resolved_expr| {
+ const resolved = resolved_expr.asString(allocator) orelse return error.InvalidNPMLockfile;
+ if (strings.hasPrefixComptime(resolved, "file:")) {
+ builder.count(resolved[5..]);
+ } else if (strings.hasPrefixComptime(resolved, "git+")) {
+ builder.count(resolved[4..]);
+ } else {
+ builder.count(resolved);
+
+ // this is over-counting but whatever. it would be too hard to determine if the case here
+ // is an `npm`/`dist_tag` version (the only times this is actually used)
+ if (pkg.get("version")) |v| if (v.asString(allocator)) |s| {
+ builder.count(s);
+ };
+ }
+ } else {
+ builder.count(pkg_path);
+ }
+ }
+ if (num_deps == std.math.maxInt(u32)) return error.InvalidNPMLockfile; // lol
+
+ debug("counted {d} dependencies", .{num_deps});
+ debug("counted {d} extern strings", .{num_extern_strings});
+ debug("counted {d} packages", .{package_idx});
+
+ try this.buffers.dependencies.ensureTotalCapacity(allocator, num_deps);
+ try this.buffers.resolutions.ensureTotalCapacity(allocator, num_deps);
+ try this.buffers.extern_strings.ensureTotalCapacity(allocator, num_extern_strings);
+ try this.packages.ensureTotalCapacity(allocator, package_idx);
+ // The package index is overallocated, but we know the upper bound
+ try this.package_index.ensureTotalCapacity(package_idx);
+ try builder.allocate();
+
+ if (workspace_map) |wksp| {
+ try this.workspace_paths.ensureTotalCapacity(allocator, wksp.map.unmanaged.entries.len);
+ try this.workspace_versions.ensureTotalCapacity(allocator, wksp.map.unmanaged.entries.len);
+
+ for (wksp.map.keys(), wksp.map.values()) |k, v| {
+ const name_hash = stringHash(v.name);
+ this.workspace_paths.putAssumeCapacity(name_hash, builder.append(String, k));
+ if (v.version) |version| this.workspace_versions.putAssumeCapacity(name_hash, version);
+ }
+ }
+
+ // Package Building Phase
+ // This initializes every package and sets the resolution to uninitialized
+ for (packages_properties.slice()) |entry| {
+ // this pass is allowed to make more assumptions because we already checked things during
+ // the counting pass
+ const pkg = entry.value.?.data.e_object;
+
+ if (pkg.get("link") != null or if (pkg.get("inBundle") orelse pkg.get("extraneous")) |x| x.data == .e_boolean and x.data.e_boolean.value else false) continue;
+
+ const pkg_path = entry.key.?.asString(allocator).?;
+
+ const workspace_entry = if (workspace_map) |map| map.map.get(pkg_path) else null;
+ const is_workspace = workspace_entry != null;
+
+ const pkg_name = if (is_workspace)
+ workspace_entry.?.name
+ else if (pkg.get("name")) |set_name|
+ (set_name.asString(this.allocator) orelse unreachable)
+ else
+ packageNameFromPath(pkg_path);
+
+ const name_hash = stringHash(pkg_name);
+
+ const package_id: Install.PackageID = @intCast(this.packages.len);
+ if (Environment.allow_assert) {
+ // If this is false, then it means we wrote wrong resolved ids
+ // During counting phase we assign all the packages an id.
+ std.debug.assert(package_id == id_map.get(pkg_path).?.new_package_id);
+ }
+
+ // Instead of calling this.appendPackage, manually append
+ // the other function has some checks that will fail since we have not set resolution+dependencies yet.
+ this.packages.appendAssumeCapacity(Lockfile.Package{
+ .name = builder.appendWithHash(String, pkg_name, name_hash),
+ .name_hash = name_hash,
+
+ // For non workspace packages these are set to .uninitialized, then in the third phase
+ // they are resolved. This is because the resolution uses the dependant's version
+ // specifier as a "hint" to resolve the dependency.
+ .resolution = if (is_workspace) Resolution.init(.{
+ // This string is counted by `processWorkspaceNamesArray`
+ .workspace = builder.append(String, pkg_path),
+ }) else Resolution{},
+
+ // we fill this data in later
+ .dependencies = undefined,
+ .resolutions = undefined,
+
+ .meta = .{
+ .id = package_id,
+
+ .origin = if (package_id == 0) .local else .npm,
+
+ .arch = if (pkg.get("cpu")) |cpu_array| arch: {
+ if (cpu_array.data != .e_array) return error.InvalidNPMLockfile;
+ var arch: Npm.Architecture = .none;
+ for (cpu_array.data.e_array.items.slice()) |item| {
+ if (item.data != .e_string) return error.InvalidNPMLockfile;
+ arch = arch.apply(item.data.e_string.data);
+ }
+ break :arch arch;
+ } else .all,
+
+ .os = if (pkg.get("os")) |cpu_array| arch: {
+ if (cpu_array.data != .e_array) return error.InvalidNPMLockfile;
+ var os: Npm.OperatingSystem = .none;
+ for (cpu_array.data.e_array.items.slice()) |item| {
+ if (item.data != .e_string) return error.InvalidNPMLockfile;
+ os = os.apply(item.data.e_string.data);
+ }
+ break :arch os;
+ } else .all,
+
+ .man_dir = String{},
+
+ .integrity = if (pkg.get("integrity")) |integrity|
+ try Integrity.parse(
+ integrity.asString(this.allocator) orelse
+ return error.InvalidNPMLockfile,
+ )
+ else
+ Integrity{},
+ },
+ .bin = if (pkg.get("bin")) |bin| bin: {
+ // we already check these conditions during counting
+ std.debug.assert(bin.data == .e_object);
+ std.debug.assert(bin.data.e_object.properties.len > 0);
+
+ // in npm lockfile, the bin is always an object, even if it is only a single one
+ // we need to detect if it's a single entry and lower it to a file.
+ if (bin.data.e_object.properties.len == 1) {
+ const prop = bin.data.e_object.properties.at(0);
+ const key = prop.key.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ const script_value = prop.value.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+
+ if (strings.eql(key, pkg_name)) {
+ break :bin .{
+ .tag = .file,
+ .value = Bin.Value.init(.{
+ .file = builder.append(String, script_value),
+ }),
+ };
+ }
+
+ break :bin .{
+ .tag = .named_file,
+ .value = Bin.Value.init(.{
+ .named_file = .{
+ builder.append(String, key),
+ builder.append(String, script_value),
+ },
+ }),
+ };
+ }
+
+ const view: Install.ExternalStringList = .{
+ .off = @truncate(this.buffers.extern_strings.items.len),
+ .len = @intCast(bin.data.e_object.properties.len * 2),
+ };
+
+ for (bin.data.e_object.properties.slice()) |bin_entry| {
+ const key = bin_entry.key.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ const script_value = bin_entry.value.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ this.buffers.extern_strings.appendAssumeCapacity(builder.append(ExternalString, key));
+ this.buffers.extern_strings.appendAssumeCapacity(builder.append(ExternalString, script_value));
+ }
+
+ if (Environment.allow_assert) {
+ std.debug.assert(this.buffers.extern_strings.items.len == view.off + view.len);
+ std.debug.assert(this.buffers.extern_strings.items.len <= this.buffers.extern_strings.capacity);
+ }
+
+ break :bin .{
+ .tag = .map,
+ .value = Bin.Value.init(.{
+ .map = view,
+ }),
+ };
+ } else Bin.init(),
+
+ .scripts = .{},
+ });
+
+ if (is_workspace) {
+ std.debug.assert(package_id != 0); // root package should not be in it's own workspace
+
+ // we defer doing getOrPutID for non-workspace packages because it depends on the resolution being set.
+ try this.getOrPutID(package_id, name_hash);
+ }
+ }
+
+ if (Environment.allow_assert) {
+ std.debug.assert(this.packages.len == package_idx);
+ }
+
+ // ignoring length check because we pre-allocated it. the length may shrink later
+ // so it's faster if we ignore the underlying length buffer and just assign it at the very end.
+ var dependencies_buf = this.buffers.dependencies.items.ptr[0..num_deps];
+ var resolutions_buf = this.buffers.resolutions.items.ptr[0..num_deps];
+
+ // pre-initialize the dependencies and resolutions to `unset_package_id`
+ if (Environment.allow_assert) {
+ @memset(dependencies_buf, Dependency{});
+ @memset(resolutions_buf, unset_package_id);
+ }
+
+ var resolutions = this.packages.items(.resolution);
+ var metas = this.packages.items(.meta);
+ var dependencies_list = this.packages.items(.dependencies);
+ var resolution_list = this.packages.items(.resolutions);
+
+ if (Environment.allow_assert) {
+ for (resolutions) |r| {
+ std.debug.assert(r.tag == .uninitialized or r.tag == .workspace);
+ }
+ }
+
+ // Root resolution isn't hit through dependency tracing.
+ resolutions[0] = Resolution.init(.{ .root = {} });
+ metas[0].origin = .local;
+ try this.getOrPutID(0, this.packages.items(.name_hash)[0]);
+
+ // made it longer than max path just in case something stupid happens
+ var name_checking_buf: [bun.MAX_PATH_BYTES * 2]u8 = undefined;
+
+ // Dependency Linking Phase
+ package_idx = 0;
+ var is_first = true;
+ for (packages_properties.slice()) |entry| {
+ // this pass is allowed to make more assumptions because we already checked things during
+ // the counting pass
+ const pkg = entry.value.?.data.e_object;
+
+ if (pkg.get("link") != null or if (pkg.get("inBundle") orelse pkg.get("extraneous")) |x| x.data == .e_boolean and x.data.e_boolean.value else false) continue;
+
+ const pkg_path = entry.key.?.asString(allocator).?;
+
+ const dependencies_start = dependencies_buf.ptr;
+ const resolutions_start = resolutions_buf.ptr;
+
+ // this is in a defer because there are two places we end this loop iteration at.
+ defer {
+ if (dependencies_start == dependencies_buf.ptr) {
+ dependencies_list[package_idx] = .{ .len = 0 };
+ resolution_list[package_idx] = .{ .len = 0 };
+ } else {
+ // Calculate the offset + length by pointer arithmetic
+ const len: u32 = @truncate((@intFromPtr(resolutions_buf.ptr) - @intFromPtr(resolutions_start)) / @sizeOf(Install.PackageID));
+ if (Environment.allow_assert) {
+ std.debug.assert(len > 0);
+ std.debug.assert(len == ((@intFromPtr(dependencies_buf.ptr) - @intFromPtr(dependencies_start)) / @sizeOf(Dependency)));
+ }
+ dependencies_list[package_idx] = .{
+ .off = @truncate((@intFromPtr(dependencies_start) - @intFromPtr(this.buffers.dependencies.items.ptr)) / @sizeOf(Dependency)),
+ .len = len,
+ };
+ resolution_list[package_idx] = .{
+ .off = @truncate((@intFromPtr(resolutions_start) - @intFromPtr(this.buffers.resolutions.items.ptr)) / @sizeOf(Install.PackageID)),
+ .len = len,
+ };
+ }
+
+ package_idx += 1;
+ }
+
+ // a feature no one has heard about: https://docs.npmjs.com/cli/v10/configuring-npm/package-json#bundledependencies
+ const bundled_dependencies = if (pkg.get("bundleDependencies") orelse pkg.get("bundledDependencies")) |expr| deps: {
+ if (expr.data == .e_boolean) {
+ if (expr.data.e_boolean.value) continue;
+ break :deps null;
+ }
+ if (expr.data != .e_array) return error.InvalidNPMLockfile;
+ const arr: *E.Array = expr.data.e_array;
+ var map = std.StringArrayHashMapUnmanaged(void){};
+ try map.ensureTotalCapacity(allocator, arr.items.len);
+ for (arr.items.slice()) |item| {
+ map.putAssumeCapacity(item.asString(allocator) orelse return error.InvalidNPMLockfile, {});
+ }
+ break :deps map;
+ } else null;
+
+ if (is_first) {
+ is_first = false;
+ if (workspace_map) |wksp| {
+ for (wksp.keys(), wksp.values()) |key, value| {
+ const entry1 = id_map.get(key) orelse return error.InvalidNPMLockfile;
+ const name_hash = stringHash(value.name);
+ const wksp_name = builder.append(String, value.name);
+ const wksp_path = builder.append(String, key);
+ dependencies_buf[0] = Dependency{
+ .name = wksp_name,
+ .name_hash = name_hash,
+ .version = .{
+ .tag = .workspace,
+ .literal = wksp_path,
+ .value = .{
+ .workspace = wksp_path,
+ },
+ },
+ .behavior = .{
+ .workspace = true,
+ },
+ };
+ resolutions_buf[0] = entry1.new_package_id;
+
+ dependencies_buf = dependencies_buf[1..];
+ resolutions_buf = resolutions_buf[1..];
+ }
+ }
+ }
+
+ inline for (dependency_keys) |dep_key| {
+ if (pkg.get(@tagName(dep_key))) |deps| {
+ // fetch the peerDependenciesMeta if it exists
+ // this is only done for peerDependencies, obviously
+ const peer_dep_meta = if (dep_key == .peerDependencies)
+ if (pkg.get("peerDependenciesMeta")) |expr| peer_dep_meta: {
+ if (expr.data != .e_object) return error.InvalidNPMLockfile;
+ break :peer_dep_meta expr.data.e_object;
+ } else null
+ else
+ void{};
+
+ if (deps.data != .e_object) return error.InvalidNPMLockfile;
+ const properties = deps.data.e_object.properties;
+
+ dep_loop: for (properties.slice()) |prop| {
+ const name_bytes = prop.key.?.asString(this.allocator).?;
+ if (bundled_dependencies != null and bundled_dependencies.?.getIndex(name_bytes) != null) continue :dep_loop;
+
+ const version_bytes = prop.value.?.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ const name_hash = stringHash(name_bytes);
+ const dep_name = builder.appendWithHash(String, name_bytes, name_hash);
+
+ const dep_version = builder.append(String, version_bytes);
+ const sliced = dep_version.sliced(this.buffers.string_bytes.items);
+
+ debug("parsing {s}, {s}\n", .{ name_bytes, version_bytes });
+ const version = Dependency.parse(
+ this.allocator,
+ dep_name,
+ sliced.slice,
+ &sliced,
+ log,
+ ) orelse {
+ return error.InvalidNPMLockfile;
+ };
+ debug("-> {s}, {}\n", .{ @tagName(version.tag), version.value });
+
+ if (Environment.allow_assert) {
+ std.debug.assert(version.tag != .uninitialized);
+ }
+
+ const str_node_modules = if (pkg_path.len == 0) "node_modules/" else "/node_modules/";
+ const suffix_len = str_node_modules.len + name_bytes.len;
+
+ var buf_len: u32 = @as(u32, @intCast(pkg_path.len + suffix_len));
+ if (buf_len > name_checking_buf.len) {
+ return error.PathTooLong;
+ }
+
+ bun.copy(u8, name_checking_buf[0..pkg_path.len], pkg_path);
+ bun.copy(u8, name_checking_buf[pkg_path.len .. pkg_path.len + str_node_modules.len], str_node_modules);
+ bun.copy(u8, name_checking_buf[pkg_path.len + str_node_modules.len .. pkg_path.len + suffix_len], name_bytes);
+
+ while (true) {
+ debug("checking {s}", .{name_checking_buf[0..buf_len]});
+ if (id_map.get(name_checking_buf[0..buf_len])) |found_| {
+ var found = found_;
+ if (found.new_package_id == package_id_is_link) {
+ // it is a workspace package, resolve from the "link": true entry to the real entry.
+ const ref_pkg = packages_properties.at(found.old_json_index).value.?.data.e_object;
+ // the `else` here is technically possible to hit
+ const resolved_v = ref_pkg.get("resolved") orelse return error.LockfileWorkspaceMissingResolved;
+ const resolved = resolved_v.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+ found = (id_map.get(resolved) orelse return error.InvalidNPMLockfile);
+ } else if (found.new_package_id == package_id_is_bundled) {
+ debug("skipping bundled dependency {s}", .{name_bytes});
+ continue :dep_loop;
+ }
+
+ const id = found.new_package_id;
+
+ var is_workspace = resolutions[id].tag == .workspace;
+
+ dependencies_buf[0] = Dependency{
+ .name = dep_name,
+ .name_hash = name_hash,
+ .version = version,
+ .behavior = .{
+ .normal = dep_key == .dependencies,
+ .optional = dep_key == .optionalDependencies,
+ .dev = dep_key == .devDependencies,
+ .peer = dep_key == .peerDependencies,
+ .workspace = is_workspace,
+ },
+ };
+ resolutions_buf[0] = id;
+
+ dependencies_buf = dependencies_buf[1..];
+ resolutions_buf = resolutions_buf[1..];
+
+ // If the package resolution is not set, resolve the target package
+ // using the information we have from the dependency declaration.
+ if (resolutions[id].tag == .uninitialized) {
+ debug("resolving '{s}'", .{name_bytes});
+
+ const res = resolved: {
+ const dep_pkg = packages_properties.at(found.old_json_index).value.?.data.e_object;
+ const npm_resolution = dep_pkg.get("resolved") orelse {
+ break :resolved Resolution.init(.{
+ .folder = builder.append(
+ String,
+ packages_properties.at(found.old_json_index).key.?.asString(allocator).?,
+ ),
+ });
+ };
+ const dep_resolved = npm_resolution.asString(this.allocator) orelse return error.InvalidNPMLockfile;
+
+ break :resolved switch (version.tag) {
+ .uninitialized => std.debug.panic("Version string {s} resolved to `.uninitialized`", .{version_bytes}),
+ .npm, .dist_tag => res: {
+ // It is theoretically possible to hit this in a case where the resolved dependency is NOT
+ // an npm dependency, but that case is so convoluted that it is not worth handling.
+ //
+ // Deleting 'package-lock.json' would completely break the installation of the project.
+ //
+ // We assume that the given URL is to *some* npm registry, or the resolution is to a workspace package.
+ // If it is a workspace package, then this branch will not be hit as the resolution was already set earlier.
+ const dep_actual_version = (dep_pkg.get("version") orelse return error.InvalidNPMLockfile)
+ .asString(this.allocator) orelse return error.InvalidNPMLockfile;
+
+ const dep_actual_version_str = builder.append(String, dep_actual_version);
+ const dep_actual_version_sliced = dep_actual_version_str.sliced(this.buffers.string_bytes.items);
+
+ break :res Resolution.init(.{
+ .npm = .{
+ .url = builder.append(String, dep_resolved),
+ .version = Semver.Version.parse(dep_actual_version_sliced).version.fill(),
+ },
+ });
+ },
+ .tarball => if (strings.hasPrefixComptime(dep_resolved, "file:"))
+ Resolution.init(.{ .local_tarball = builder.append(String, dep_resolved[5..]) })
+ else
+ Resolution.init(.{ .remote_tarball = builder.append(String, dep_resolved) }),
+ .folder => Resolution.init(.{ .folder = builder.append(String, dep_resolved) }),
+ // not sure if this is possible to hit
+ .symlink => Resolution.init(.{ .folder = builder.append(String, dep_resolved) }),
+ .workspace => workspace: {
+ var input = builder.append(String, dep_resolved).sliced(this.buffers.string_bytes.items);
+ if (strings.hasPrefixComptime(input.slice, "workspace:")) {
+ input = input.sub(input.slice["workspace:".len..]);
+ }
+ break :workspace Resolution.init(.{
+ .workspace = input.value(),
+ });
+ },
+ .git => res: {
+ const str = (if (strings.hasPrefixComptime(dep_resolved, "git+"))
+ builder.append(String, dep_resolved[4..])
+ else
+ builder.append(String, dep_resolved))
+ .sliced(this.buffers.string_bytes.items);
+
+ const hash_index = strings.lastIndexOfChar(str.slice, '#') orelse return error.InvalidNPMLockfile;
+
+ const commit = str.sub(str.slice[hash_index + 1 ..]).value();
+ break :res Resolution.init(.{
+ .git = .{
+ .owner = version.value.git.owner,
+ .repo = str.sub(str.slice[0..hash_index]).value(),
+ .committish = commit,
+ .resolved = commit,
+ .package_name = dep_name,
+ },
+ });
+ },
+ .github => res: {
+ const str = (if (strings.hasPrefixComptime(dep_resolved, "git+"))
+ builder.append(String, dep_resolved[4..])
+ else
+ builder.append(String, dep_resolved))
+ .sliced(this.buffers.string_bytes.items);
+
+ const hash_index = strings.lastIndexOfChar(str.slice, '#') orelse return error.InvalidNPMLockfile;
+
+ const commit = str.sub(str.slice[hash_index + 1 ..]).value();
+ break :res Resolution.init(.{
+ .git = .{
+ .owner = version.value.github.owner,
+ .repo = str.sub(str.slice[0..hash_index]).value(),
+ .committish = commit,
+ .resolved = commit,
+ .package_name = dep_name,
+ },
+ });
+ },
+ };
+ };
+ debug("-> {}", .{res.fmtForDebug(this.buffers.string_bytes.items)});
+
+ resolutions[id] = res;
+ metas[id].origin = switch (res.tag) {
+ // This works?
+ .root => .local,
+ else => .npm,
+ };
+
+ try this.getOrPutID(id, this.packages.items(.name_hash)[id]);
+ }
+
+ continue :dep_loop;
+ }
+ // step
+ if (strings.lastIndexOf(name_checking_buf[0..buf_len -| ("node_modules/".len + name_bytes.len)], "node_modules/")) |idx| {
+ debug("found 'node_modules/' at {d}", .{idx});
+ buf_len = @intCast(idx + "node_modules/".len + name_bytes.len);
+ bun.copy(u8, name_checking_buf[idx + "node_modules/".len .. idx + "node_modules/".len + name_bytes.len], name_bytes);
+ } else if (!strings.hasPrefixComptime(name_checking_buf[0..buf_len], "node_modules/")) {
+ // this is hit if you start from `packages/etc`, from `packages/etc/node_modules/xyz`
+ // we need to hit the root node_modules
+ buf_len = @intCast("node_modules/".len + name_bytes.len);
+ bun.copy(u8, name_checking_buf[0..buf_len], "node_modules/");
+ bun.copy(u8, name_checking_buf[buf_len - name_bytes.len .. buf_len], name_bytes);
+ } else {
+ // optional peer dependencies can be ... optional
+ if (dep_key == .peerDependencies) {
+ if (peer_dep_meta) |o| if (o.get(name_bytes)) |meta| {
+ if (meta.data != .e_object) return error.InvalidNPMLockfile;
+ if (meta.data.e_object.get("optional")) |optional| {
+ if (optional.data != .e_boolean) return error.InvalidNPMLockfile;
+ if (optional.data.e_boolean.value) {
+ dependencies_buf[0] = Dependency{
+ .name = dep_name,
+ .name_hash = name_hash,
+ .version = version,
+ .behavior = .{
+ .normal = dep_key == .dependencies,
+ .optional = true,
+ .dev = dep_key == .devDependencies,
+ .peer = dep_key == .peerDependencies,
+ .workspace = false,
+ },
+ };
+ resolutions_buf[0] = Install.invalid_package_id;
+ dependencies_buf = dependencies_buf[1..];
+ resolutions_buf = resolutions_buf[1..];
+ continue :dep_loop;
+ }
+ }
+ };
+ }
+
+ // it is technically possible to get a package-lock.json without a dependency.
+ // it's very unlikely, but possible. when NPM sees this, it essentially doesnt install the package, and treats it like it doesn't exist.
+ // in test/cli/install/migrate-fixture, you can observe this for `iconv-lite`
+ debug("could not find package '{s}' in '{s}'", .{ name_bytes, pkg_path });
+ continue :dep_loop;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ this.buffers.resolutions.items.len = (@intFromPtr(resolutions_buf.ptr) - @intFromPtr(this.buffers.resolutions.items.ptr)) / @sizeOf(Install.PackageID);
+ this.buffers.dependencies.items.len = this.buffers.resolutions.items.len;
+
+ // In allow_assert, we prefill this buffer with uninitialized values that we can detect later
+ // It is our fault if we hit an error here, making it safe to disable in release.
+ if (Environment.allow_assert) {
+ std.debug.assert(this.buffers.dependencies.items.len == (@intFromPtr(dependencies_buf.ptr) - @intFromPtr(this.buffers.dependencies.items.ptr)) / @sizeOf(Dependency));
+ std.debug.assert(this.buffers.dependencies.items.len <= num_deps);
+ var crash = false;
+ for (this.buffers.dependencies.items, 0..) |r, i| {
+ // 'if behavior is uninitialized'
+ if (r.behavior.eq(.{})) {
+ debug("dependency index '{d}' was not set", .{i});
+ crash = true;
+ }
+ }
+ for (this.buffers.resolutions.items, 0..) |r, i| {
+ if (r == unset_package_id) {
+ debug("resolution index '{d}' was not set", .{i});
+ crash = true;
+ }
+ }
+ if (crash) {
+ std.debug.panic("Assertion failure, see above", .{});
+ }
+ }
+
+ // A package not having a resolution, however, is not our fault.
+ // This can be triggered by a bad lockfile with extra packages. NPM should trim packages out automatically.
+ var is_missing_resolutions = false;
+ for (resolutions, 0..) |r, i| {
+ if (r.tag == .uninitialized) {
+ Output.printErrorln("Could not resolve package '{s}' in lockfile.", .{this.packages.items(.name)[i].slice(this.buffers.string_bytes.items)});
+ is_missing_resolutions = true;
+ } else if (Environment.allow_assert) {
+ // Assertion from appendPackage. If we do this too early it will always fail as we dont have the resolution written
+ // but after we write all the data, there is no excuse for this to fail.
+ //
+ // If this is hit, it means getOrPutID was not called on this package id. Look for where 'resolution[i]' is set
+ std.debug.assert(this.getPackageID(this.packages.items(.name_hash)[i], null, &r) != null);
+ }
+ }
+ if (is_missing_resolutions) {
+ return error.NotAllPackagesGotResolved;
+ }
+
+ // if (Environment.isDebug) {
+ // const dump_file = try std.fs.cwd().createFileZ("before-clean.json", .{});
+ // defer dump_file.close();
+ // try std.json.stringify(this, .{ .whitespace = .indent_2 }, dump_file.writer());
+ // }
+
+ // This is definitely a memory leak, but it's fine because there is no install api, so this can only be leaked once per process.
+ // This operation is neccecary because callers of `loadFromDisk` assume the data is written into the passed `this`.
+ // You'll find that not cleaning the lockfile will cause `bun install` to not actually install anything since it doesnt have any hoisted trees.
+ this.* = (try this.cleanWithLogger(&[_]Install.PackageManager.UpdateRequest{}, log, false)).*;
+
+ // if (Environment.isDebug) {
+ // const dump_file = try std.fs.cwd().createFileZ("after-clean.json", .{});
+ // defer dump_file.close();
+ // try std.json.stringify(this, .{ .whitespace = .indent_2 }, dump_file.writer());
+ // }
+
+ if (Environment.allow_assert) {
+ try this.verifyData();
+ }
+
+ this.meta_hash = try this.generateMetaHash(false);
+
+ return LoadFromDiskResult{ .ok = this };
+}
+
+fn packageNameFromPath(pkg_path: []const u8) []const u8 {
+ if (pkg_path.len == 0) return "";
+
+ const pkg_name_start: usize = if (strings.lastIndexOf(pkg_path, "/node_modules/")) |last_index|
+ last_index + "/node_modules/".len
+ else if (strings.hasPrefixComptime(pkg_path, "node_modules/"))
+ "node_modules/".len
+ else
+ strings.lastIndexOf(pkg_path, "/") orelse 0;
+
+ return pkg_path[pkg_name_start..];
+}
diff --git a/src/install/npm.zig b/src/install/npm.zig
index 78d0f6061..4cf1c2b71 100644
--- a/src/install/npm.zig
+++ b/src/install/npm.zig
@@ -327,12 +327,18 @@ pub const OperatingSystem = enum(u16) {
return (@intFromEnum(this) & linux) != 0;
} else if (comptime Environment.isMac) {
return (@intFromEnum(this) & darwin) != 0;
+ } else if (comptime Environment.isWindows) {
+ return (@intFromEnum(this) & win32) != 0;
} else {
return false;
}
}
- const NameMap = ComptimeStringMap(u16, .{
+ pub inline fn has(this: OperatingSystem, other: u16) bool {
+ return (@intFromEnum(this) & other) != 0;
+ }
+
+ pub const NameMap = ComptimeStringMap(u16, .{
.{ "aix", aix },
.{ "darwin", darwin },
.{ "freebsd", freebsd },
@@ -383,7 +389,7 @@ pub const Architecture = enum(u16) {
pub const all_value: u16 = arm | arm64 | ia32 | mips | mipsel | ppc | ppc64 | s390 | s390x | x32 | x64;
- const NameMap = ComptimeStringMap(u16, .{
+ pub const NameMap = ComptimeStringMap(u16, .{
.{ "arm", arm },
.{ "arm64", arm64 },
.{ "ia32", ia32 },
@@ -397,6 +403,10 @@ pub const Architecture = enum(u16) {
.{ "x64", x64 },
});
+ pub inline fn has(this: Architecture, other: u16) bool {
+ return (@intFromEnum(this) & other) != 0;
+ }
+
pub fn isMatch(this: Architecture) bool {
if (comptime Environment.isAarch64) {
return (@intFromEnum(this) & arm64) != 0;
@@ -800,7 +810,14 @@ pub const PackageManifest = struct {
if (this.findByDistTag("latest")) |result| {
if (group.satisfies(result.version)) {
- return result;
+ if (group.flags.isSet(Semver.Query.Group.Flags.pre)) {
+ if (left.version.order(result.version, this.string_buf, this.string_buf) == .eq) {
+ // if prerelease, use latest if semver+tag match range exactly
+ return result;
+ }
+ } else {
+ return result;
+ }
}
}
diff --git a/src/install/padding_checker.zig b/src/install/padding_checker.zig
index 1d9405a43..52d343b4f 100644
--- a/src/install/padding_checker.zig
+++ b/src/install/padding_checker.zig
@@ -55,7 +55,6 @@ pub fn assertNoUninitializedPadding(comptime T: type) void {
// if (info.layout != .Extern) {
// @compileError("assertNoUninitializedPadding(" ++ @typeName(T) ++ ") expects an extern struct type, got a struct of layout '" ++ @tagName(info.layout) ++ "'");
// }
- var i = 0;
for (info.fields) |field| {
const fieldInfo = @typeInfo(field.type);
switch (fieldInfo) {
@@ -69,9 +68,12 @@ pub fn assertNoUninitializedPadding(comptime T: type) void {
else => {},
}
}
+
if (info_ == .Union) {
return;
}
+
+ var i = 0;
for (info.fields, 0..) |field, j| {
const offset = @offsetOf(T, field.name);
if (offset != i) {
@@ -90,4 +92,17 @@ pub fn assertNoUninitializedPadding(comptime T: type) void {
}
i = offset + @sizeOf(field.type);
}
+
+ if (i != @sizeOf(T)) {
+ @compileError(std.fmt.comptimePrint(
+ \\Expected no possibly uninitialized bytes of memory in '{s}', but found a {d} byte gap at the end of the struct. This can be fixed by adding a padding field to the struct like `padding: [{d}]u8 = .{{0}} ** {d},` between these fields. For more information, look at `padding_checker.zig`
+ ,
+ .{
+ @typeName(T),
+ @sizeOf(T) - i,
+ @sizeOf(T) - i,
+ @sizeOf(T) - i,
+ },
+ ));
+ }
}
diff --git a/src/install/resolution.zig b/src/install/resolution.zig
index 23f3bc5c0..84d43ff1a 100644
--- a/src/install/resolution.zig
+++ b/src/install/resolution.zig
@@ -15,6 +15,14 @@ pub const Resolution = extern struct {
_padding: [7]u8 = .{0} ** 7,
value: Value = .{ .uninitialized = {} },
+ /// Use like Resolution.init(.{ .npm = VersionedURL{ ... } })
+ pub inline fn init(value: anytype) Resolution {
+ return Resolution{
+ .tag = @field(Tag, @typeInfo(@TypeOf(value)).Struct.fields[0].name),
+ .value = Value.init(value),
+ };
+ }
+
pub fn order(
lhs: *const Resolution,
rhs: *const Resolution,
@@ -107,18 +115,22 @@ pub const Resolution = extern struct {
}),
.root => Value.init(.{ .root = {} }),
else => {
- std.debug.panic("Internal error: unexpected resolution tag:,) {}", .{this.tag});
+ std.debug.panic("Internal error: unexpected resolution tag: {}", .{this.tag});
},
},
};
}
- pub fn fmt(this: *const Resolution, buf: []const u8) Formatter {
- return Formatter{ .resolution = this, .buf = buf };
+ pub fn fmt(this: *const Resolution, string_bytes: []const u8) Formatter {
+ return Formatter{ .resolution = this, .buf = string_bytes };
}
- pub fn fmtURL(this: *const Resolution, options: *const PackageManager.Options, buf: []const u8) URLFormatter {
- return URLFormatter{ .resolution = this, .buf = buf, .options = options };
+ pub fn fmtURL(this: *const Resolution, options: *const PackageManager.Options, string_bytes: []const u8) URLFormatter {
+ return URLFormatter{ .resolution = this, .buf = string_bytes, .options = options };
+ }
+
+ pub fn fmtForDebug(this: *const Resolution, string_bytes: []const u8) DebugFormatter {
+ return DebugFormatter{ .resolution = this, .buf = string_bytes };
}
pub fn eql(
@@ -225,6 +237,31 @@ pub const Resolution = extern struct {
}
};
+ pub const DebugFormatter = struct {
+ resolution: *const Resolution,
+ buf: []const u8,
+
+ pub fn format(formatter: DebugFormatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void {
+ try writer.writeAll("Resolution{ .");
+ try writer.writeAll(std.enums.tagName(Tag, formatter.resolution.tag) orelse "invalid");
+ try writer.writeAll(" = ");
+ switch (formatter.resolution.tag) {
+ .npm => try formatter.resolution.value.npm.version.fmt(formatter.buf).format(layout, opts, writer),
+ .local_tarball => try writer.writeAll(formatter.resolution.value.local_tarball.slice(formatter.buf)),
+ .folder => try writer.writeAll(formatter.resolution.value.folder.slice(formatter.buf)),
+ .remote_tarball => try writer.writeAll(formatter.resolution.value.remote_tarball.slice(formatter.buf)),
+ .git => try formatter.resolution.value.git.formatAs("git+", formatter.buf, layout, opts, writer),
+ .github => try formatter.resolution.value.github.formatAs("github:", formatter.buf, layout, opts, writer),
+ .gitlab => try formatter.resolution.value.gitlab.formatAs("gitlab:", formatter.buf, layout, opts, writer),
+ .workspace => try std.fmt.format(writer, "workspace:{s}", .{formatter.resolution.value.workspace.slice(formatter.buf)}),
+ .symlink => try std.fmt.format(writer, "link:{s}", .{formatter.resolution.value.symlink.slice(formatter.buf)}),
+ .single_file_module => try std.fmt.format(writer, "module:{s}", .{formatter.resolution.value.single_file_module.slice(formatter.buf)}),
+ else => try writer.writeAll("{}"),
+ }
+ try writer.writeAll(" }");
+ }
+ };
+
pub const Value = extern union {
uninitialized: void,
root: void,
diff --git a/src/install/semver.zig b/src/install/semver.zig
index f76238fa5..9572b85e2 100644
--- a/src/install/semver.zig
+++ b/src/install/semver.zig
@@ -576,23 +576,36 @@ pub const SlicedString = struct {
slice: string,
pub inline fn init(buf: string, slice: string) SlicedString {
+ if (Environment.allow_assert) {
+ if (@intFromPtr(buf.ptr) > @intFromPtr(slice.ptr)) {
+ @panic("SlicedString.init buf is not in front of slice");
+ }
+ }
return SlicedString{ .buf = buf, .slice = slice };
}
pub inline fn external(this: SlicedString) ExternalString {
- if (comptime Environment.allow_assert) std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.slice.ptr) and ((@intFromPtr(this.slice.ptr) + this.slice.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ if (comptime Environment.allow_assert) {
+ std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.slice.ptr) and ((@intFromPtr(this.slice.ptr) + this.slice.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ }
return ExternalString.init(this.buf, this.slice, bun.Wyhash.hash(0, this.slice));
}
pub inline fn value(this: SlicedString) String {
- if (comptime Environment.allow_assert) std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.slice.ptr) and ((@intFromPtr(this.slice.ptr) + this.slice.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ if (comptime Environment.allow_assert) {
+ std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.slice.ptr) and ((@intFromPtr(this.slice.ptr) + this.slice.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ }
return String.init(this.buf, this.slice);
}
pub inline fn sub(this: SlicedString, input: string) SlicedString {
- std.debug.assert(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.buf.ptr) and ((@intFromPtr(input.ptr) + input.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)));
+ if (Environment.allow_assert) {
+ if (!(@intFromPtr(this.buf.ptr) <= @intFromPtr(this.buf.ptr) and ((@intFromPtr(input.ptr) + input.len) <= (@intFromPtr(this.buf.ptr) + this.buf.len)))) {
+ @panic("SlicedString.sub input is not a substring of the slice");
+ }
+ }
return SlicedString{ .buf = this.buf, .slice = input };
}
};