diff options
author | 2021-12-30 21:12:32 -0800 | |
---|---|---|
committer | 2021-12-30 21:12:32 -0800 | |
commit | e75c711c68896f5952793601f156c921c814caab (patch) | |
tree | f3b30e2281c7231d480bb84503d17b2370866ff9 /src/install | |
parent | 8d031f13c0e04629d431176e211a31224b7618c0 (diff) | |
download | bun-e75c711c68896f5952793601f156c921c814caab.tar.gz bun-e75c711c68896f5952793601f156c921c814caab.tar.zst bun-e75c711c68896f5952793601f156c921c814caab.zip |
Upgrade to latest Zig, remove dependency on patched version of Zig (#96)
* Prepare to upgrade zig
* zig fmt
* AllocGate
* Update data_url.zig
* wip
* few files
* just headers now?
* I think everything works?
* Update mimalloc
* Update hash_map.zig
* Perf improvements to compensate for Allocgate
* Bump
* :camera:
* Update bun.lockb
* Less branching
* [js parser] Slightly reduce memory usage
* Update js_parser.zig
* WIP remove unused
* [JS parser] WIP support for `with` keyword
* Remove more dead code
* Fix all the build errors!
* cleanup
* Move `network_thread` up
* Bump peechy
* Update README.md
Diffstat (limited to 'src/install')
-rw-r--r-- | src/install/bin.zig | 1 | ||||
-rw-r--r-- | src/install/bit_set.zig | 18 | ||||
-rw-r--r-- | src/install/dependency.zig | 6 | ||||
-rw-r--r-- | src/install/extract_tarball.zig | 2 | ||||
-rw-r--r-- | src/install/install.zig | 219 | ||||
-rw-r--r-- | src/install/integrity.zig | 4 | ||||
-rw-r--r-- | src/install/npm.zig | 24 | ||||
-rw-r--r-- | src/install/repository.zig | 2 | ||||
-rw-r--r-- | src/install/resolvers/folder_resolver.zig | 6 | ||||
-rw-r--r-- | src/install/semver.zig | 48 |
10 files changed, 115 insertions, 215 deletions
diff --git a/src/install/bin.zig b/src/install/bin.zig index fb343a16d..d40c43a04 100644 --- a/src/install/bin.zig +++ b/src/install/bin.zig @@ -157,7 +157,6 @@ pub const Bin = extern struct { remain = remain[name.len..]; remain[0] = std.fs.path.sep; remain = remain[1..]; - const base_len = @ptrToInt(remain.ptr) - @ptrToInt(&path_buf); if (comptime Environment.isWindows) { @compileError("Bin.Linker.link() needs to be updated to generate .cmd files on Windows"); diff --git a/src/install/bit_set.zig b/src/install/bit_set.zig index d788a2ec9..64903c9e9 100644 --- a/src/install/bit_set.zig +++ b/src/install/bit_set.zig @@ -476,7 +476,7 @@ pub const DynamicBitSetUnmanaged = struct { /// Creates a bit set with no elements present. /// If bit_length is not zero, deinit must eventually be called. - pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self { + pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self { var self = Self{}; try self.resize(bit_length, false, allocator); return self; @@ -484,7 +484,7 @@ pub const DynamicBitSetUnmanaged = struct { /// Creates a bit set with all elements present. /// If bit_length is not zero, deinit must eventually be called. - pub fn initFull(bit_length: usize, allocator: *Allocator) !Self { + pub fn initFull(bit_length: usize, allocator: Allocator) !Self { var self = Self{}; try self.resize(bit_length, true, allocator); return self; @@ -493,7 +493,7 @@ pub const DynamicBitSetUnmanaged = struct { /// Resizes to a new bit_length. If the new length is larger /// than the old length, fills any added bits with `fill`. /// If new_len is not zero, deinit must eventually be called. - pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void { + pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: Allocator) !void { const old_len = self.bit_length; const old_masks = numMasks(old_len); @@ -556,12 +556,12 @@ pub const DynamicBitSetUnmanaged = struct { /// deinitializes the array and releases its memory. /// The passed allocator must be the same one used for /// init* or resize in the past. - pub fn deinit(self: *Self, allocator: *Allocator) void { + pub fn deinit(self: *Self, allocator: Allocator) void { self.resize(0, false, allocator) catch unreachable; } /// Creates a duplicate of this bit set, using the new allocator. - pub fn clone(self: *const Self, new_allocator: *Allocator) !Self { + pub fn clone(self: *const Self, new_allocator: Allocator) !Self { const num_masks = numMasks(self.bit_length); var copy = Self{}; try copy.resize(self.bit_length, false, new_allocator); @@ -773,13 +773,13 @@ pub const DynamicBitSet = struct { pub const ShiftInt = std.math.Log2Int(MaskInt); /// The allocator used by this bit set - allocator: *Allocator, + allocator: Allocator, /// The number of valid items in this bit set unmanaged: DynamicBitSetUnmanaged = .{}, /// Creates a bit set with no elements present. - pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self { + pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self { return Self{ .unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator), .allocator = allocator, @@ -787,7 +787,7 @@ pub const DynamicBitSet = struct { } /// Creates a bit set with all elements present. - pub fn initFull(bit_length: usize, allocator: *Allocator) !Self { + pub fn initFull(bit_length: usize, allocator: Allocator) !Self { return Self{ .unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator), .allocator = allocator, @@ -808,7 +808,7 @@ pub const DynamicBitSet = struct { } /// Creates a duplicate of this bit set, using the new allocator. - pub fn clone(self: *const Self, new_allocator: *Allocator) !Self { + pub fn clone(self: *const Self, new_allocator: Allocator) !Self { return Self{ .unmanaged = try self.unmanaged.clone(new_allocator), .allocator = new_allocator, diff --git a/src/install/dependency.zig b/src/install/dependency.zig index 4b027aaff..9c2180334 100644 --- a/src/install/dependency.zig +++ b/src/install/dependency.zig @@ -94,7 +94,7 @@ pub const External = extern struct { version: Dependency.Version.External, pub const Context = struct { - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, buffer: []const u8, }; @@ -473,7 +473,7 @@ pub fn eqlResolved(a: Dependency, b: Dependency) bool { return @as(Dependency.Version.Tag, a.version) == @as(Dependency.Version.Tag, b.version) and a.resolution == b.resolution; } -pub fn parse(allocator: *std.mem.Allocator, dependency_: string, sliced: *const SlicedString, log: ?*logger.Log) ?Version { +pub fn parse(allocator: std.mem.Allocator, dependency_: string, sliced: *const SlicedString, log: ?*logger.Log) ?Version { var dependency = std.mem.trimLeft(u8, dependency_, " \t\n\r"); if (dependency.len == 0) return null; @@ -500,7 +500,7 @@ pub fn parse(allocator: *std.mem.Allocator, dependency_: string, sliced: *const } pub fn parseWithTag( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, dependency: string, tag: Dependency.Version.Tag, sliced: *const SlicedString, diff --git a/src/install/extract_tarball.zig b/src/install/extract_tarball.zig index 6057448e5..27c8a811b 100644 --- a/src/install/extract_tarball.zig +++ b/src/install/extract_tarball.zig @@ -188,7 +188,7 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !string { Output.flush(); Global.crash(); }; - const extracted_file_count = if (PackageManager.verbose_install) + _ = if (PackageManager.verbose_install) try Archive.extractToDisk( zlib_pool.data.list.items, temp_destination, diff --git a/src/install/install.zig b/src/install/install.zig index 3f88f2e62..cb0f91df0 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -1,4 +1,13 @@ -usingnamespace @import("../global.zig"); +const _global = @import("../global.zig"); +const string = _global.string; +const Output = _global.Output; +const Global = _global.Global; +const Environment = _global.Environment; +const strings = _global.strings; +const MutableString = _global.MutableString; +const stringZ = _global.stringZ; +const default_allocator = _global.default_allocator; +const C = _global.C; const std = @import("std"); const JSLexer = @import("../js_lexer.zig"); @@ -128,47 +137,10 @@ pub const ExternalStringMap = extern struct { name: ExternalStringList = ExternalStringList{}, value: ExternalStringList = ExternalStringList{}, - pub const Iterator = NewIterator(ExternalStringList); - pub const Small = extern struct { name: SmallExternalStringList = SmallExternalStringList{}, value: SmallExternalStringList = SmallExternalStringList{}, - - pub const Iterator = NewIterator(SmallExternalStringList); - - pub inline fn iterator(this: Small, buf: []const String) Small.Iterator { - return Small.Iterator.init(buf, this.name, this.value); - } }; - - pub inline fn iterator(this: ExternalStringMap, buf: []const String) Iterator { - return Iterator.init(buf, this.name, this.value); - } - - fn NewIterator(comptime Type: type) type { - return struct { - const ThisIterator = @This(); - - i: usize = 0, - names: []const Type.Child, - values: []const Type.Child, - - pub fn init(all: []const Type.Child, names: Type, values: Type) ThisIterator { - this.names = names.get(all); - this.values = values.get(all); - return this; - } - - pub fn next(this: *ThisIterator) ?[2]Type.Child { - if (this.i < this.names.len) { - const ret = [2]Type.Child{ this.names[this.i], this.values[this.i] }; - this.i += 1; - } - - return null; - } - }; - } }; pub const PackageNameHash = u64; @@ -176,7 +148,6 @@ pub const PackageNameHash = u64; pub const Aligner = struct { pub fn write(comptime Type: type, comptime Writer: type, writer: Writer, pos: usize) !usize { const to_write = std.mem.alignForward(pos, @alignOf(Type)) - pos; - var i: usize = 0; var remainder: string = alignment_bytes_to_repeat_buffer[0..@minimum(to_write, alignment_bytes_to_repeat_buffer.len)]; try writer.writeAll(remainder); @@ -193,7 +164,7 @@ const NetworkTask = struct { http: AsyncHTTP = undefined, task_id: u64, url_buf: []const u8 = &[_]u8{}, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, request_buffer: MutableString = undefined, response_buffer: MutableString = undefined, callback: union(Task.Tag) { @@ -214,7 +185,7 @@ const NetworkTask = struct { pub fn forManifest( this: *NetworkTask, name: string, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, registry_url: URL, loaded_manifest: ?Npm.PackageManifest, ) !void { @@ -304,7 +275,7 @@ const NetworkTask = struct { pub fn forTarball( this: *NetworkTask, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, tarball: ExtractTarball, ) !void { this.url_buf = try ExtractTarball.buildURL( @@ -390,7 +361,7 @@ pub const Lockfile = struct { package_index: PackageIndex.Map, unique_packages: Bitset, string_pool: StringPool, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, scratch: Scratch = Scratch{}, const Stream = std.io.FixedBufferStream([]u8); @@ -413,7 +384,7 @@ pub const Lockfile = struct { }; }; - pub fn loadFromDisk(this: *Lockfile, allocator: *std.mem.Allocator, log: *logger.Log, filename: stringZ) LoadFromDiskResult { + pub fn loadFromDisk(this: *Lockfile, allocator: std.mem.Allocator, log: *logger.Log, filename: stringZ) LoadFromDiskResult { std.debug.assert(FileSystem.instance_loaded); var file = std.fs.cwd().openFileZ(filename, .{ .read = true }) catch |err| { return switch (err) { @@ -552,7 +523,7 @@ pub const Lockfile = struct { }; const Builder = struct { - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, name_hashes: []const PackageNameHash, list: ArrayList = ArrayList{}, resolutions: []const PackageID, @@ -573,7 +544,6 @@ pub const Lockfile = struct { var i: Id = 0; var total_packages_count: u32 = 0; - var slice = this.list.slice(); var trees = this.list.items(.tree); var packages = this.list.items(.packages); @@ -653,12 +623,12 @@ pub const Lockfile = struct { name_hashes: []const PackageNameHash, lists: []Lockfile.PackageIDList, trees: []Tree, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, ) Id { const this_packages = this.packages.get(lists[this.id].items); const name_hash = name_hashes[package_id]; - for (this_packages) |pid, slot| { + for (this_packages) |pid| { if (name_hashes[pid] == name_hash) { if (pid != package_id) { return dependency_loop; @@ -691,17 +661,15 @@ pub const Lockfile = struct { } }; - pub fn clean(old: *Lockfile, deduped: *u32, updates: []PackageManager.UpdateRequest, options: *const PackageManager.Options) !*Lockfile { + pub fn clean(old: *Lockfile, _: *u32, updates: []PackageManager.UpdateRequest, _: *const PackageManager.Options) !*Lockfile { // We will only shrink the number of packages here. // never grow - const max_package_id = old.packages.len; if (updates.len > 0) { var root_deps: []Dependency = old.packages.items(.dependencies)[0].mut(old.buffers.dependencies.items); const old_resolutions: []const PackageID = old.packages.items(.resolutions)[0].get(old.buffers.resolutions.items); const resolutions_of_yore: []const Resolution = old.packages.items(.resolution); - const old_names = old.packages.items(.name); var string_builder = old.stringBuilder(); for (updates) |update| { if (update.version.tag == .uninitialized) { @@ -721,7 +689,6 @@ pub const Lockfile = struct { try string_builder.allocate(); defer string_builder.clamp(); - var full_buf = string_builder.ptr.?[0 .. string_builder.cap + old.buffers.string_bytes.items.len]; var temp_buf: [513]u8 = undefined; for (updates) |update, update_i| { @@ -797,11 +764,6 @@ pub const Lockfile = struct { try new.packages.ensureTotalCapacity(old.allocator, old.packages.len); try new.buffers.preallocate(old.buffers, old.allocator); - const InstallOrder = struct { - parent: PackageID, - children: PackageIDSlice, - }; - old.scratch.dependency_list_queue.head = 0; // Step 1. Recreate the lockfile with only the packages that are still alive @@ -1005,7 +967,7 @@ pub const Lockfile = struct { pub const Printer = struct { lockfile: *Lockfile, options: PackageManager.Options, - successfully_installed: ?std.DynamicBitSetUnmanaged = null, + successfully_installed: ?Bitset = null, pub const Format = enum { yarn }; @@ -1013,7 +975,7 @@ pub const Lockfile = struct { var lockfile_path_buf2: [std.fs.MAX_PATH_BYTES]u8 = undefined; pub fn print( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, lockfile_path_: string, format: Format, @@ -1081,7 +1043,7 @@ pub const Lockfile = struct { } pub fn printWithLockfile( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, lockfile: *Lockfile, format: Format, comptime Writer: type, @@ -1130,28 +1092,18 @@ pub const Lockfile = struct { writer: Writer, comptime enable_ansi_colors: bool, ) !void { - var lockfile = this.lockfile; - - const IDDepthPair = struct { - depth: u16 = 0, - id: PackageID, - }; - var visited = try Bitset.initEmpty(this.lockfile.packages.len, this.lockfile.allocator); var slice = this.lockfile.packages.slice(); const names: []const String = slice.items(.name); const resolved: []const Resolution = slice.items(.resolution); - const metas: []const Lockfile.Package.Meta = slice.items(.meta); if (names.len == 0) return; const dependency_lists = slice.items(.dependencies); const resolutions_list = slice.items(.resolutions); const resolutions_buffer = this.lockfile.buffers.resolutions.items; const dependencies_buffer = this.lockfile.buffers.dependencies.items; - const package_count = @truncate(PackageID, names.len); const string_buf = this.lockfile.buffers.string_bytes.items; - const root = this.lockfile.rootPackage() orelse return; visited.set(0); const end = @truncate(PackageID, names.len); @@ -1163,8 +1115,6 @@ pub const Lockfile = struct { const package_name = names[package_id].slice(string_buf); - const dependency_list = dependency_lists[package_id]; - const fmt = comptime brk: { if (enable_ansi_colors) { break :brk Output.prettyFmt("<r> <green>+<r> <b>{s}<r><d>@{}<r>\n", enable_ansi_colors); @@ -1185,8 +1135,6 @@ pub const Lockfile = struct { for (names) |name, package_id| { const package_name = name.slice(string_buf); - const dependency_list = dependency_lists[package_id]; - try writer.print( comptime Output.prettyFmt(" <r><b>{s}<r><d>@<b>{}<r>\n", enable_ansi_colors), .{ @@ -1251,7 +1199,6 @@ pub const Lockfile = struct { const metas: []const Lockfile.Package.Meta = slice.items(.meta); if (names.len == 0) return; const dependency_lists = slice.items(.dependencies); - const resolutions_list = slice.items(.resolutions); const resolutions_buffer = this.lockfile.buffers.resolutions.items; const dependencies_buffer = this.lockfile.buffers.dependencies.items; const RequestedVersion = std.HashMap(PackageID, []Dependency.Version, IdentityContext(PackageID), 80); @@ -1364,7 +1311,7 @@ pub const Lockfile = struct { if (dependencies.len > 0) { var behavior = Behavior.uninitialized; var dependency_behavior_change_count: u8 = 0; - for (dependencies) |dep, j| { + for (dependencies) |dep| { if (dep.behavior != behavior) { if (dep.behavior.isOptional()) { try writer.writeAll(" optionalDependencies:\n"); @@ -1441,7 +1388,7 @@ pub const Lockfile = struct { std.mem.writeIntNative(u64, secret[0..8], @intCast(u64, std.time.milliTimestamp())); var rng = std.rand.Gimli.init(secret); var base64_bytes: [64]u8 = undefined; - rng.random.bytes(&base64_bytes); + rng.random().bytes(&base64_bytes); const tmpname__ = std.fmt.bufPrint(tmpname_buf[8..], "{s}", .{std.fmt.fmtSliceHexLower(&base64_bytes)}) catch unreachable; tmpname_buf[tmpname__.len + 8] = 0; @@ -1488,14 +1435,7 @@ pub const Lockfile = struct { return slicable.slice(this.buffers.string_bytes.items); } - pub inline fn cloneString(this: *Lockfile, slicable: anytype, from: *Lockfile) string { - // const slice = from.str(slicable); - // if (this.string_pool) { - - // } - } - - pub fn initEmpty(this: *Lockfile, allocator: *std.mem.Allocator) !void { + pub fn initEmpty(this: *Lockfile, allocator: std.mem.Allocator) !void { this.* = Lockfile{ .format = .v0, .packages = Lockfile.Package.List{}, @@ -1662,7 +1602,7 @@ pub const Lockfile = struct { duplicate_checker_map: DuplicateCheckerMap = undefined, dependency_list_queue: DependencyQueue = undefined, - pub fn init(allocator: *std.mem.Allocator) Scratch { + pub fn init(allocator: std.mem.Allocator) Scratch { return Scratch{ .dependency_list_queue = DependencyQueue.init(allocator), .duplicate_checker_map = DuplicateCheckerMap.init(allocator), @@ -1815,7 +1755,6 @@ pub const Lockfile = struct { pub const DependencySlice = ExternalSlice(Dependency); pub const PackageIDSlice = ExternalSlice(PackageID); - pub const NodeModulesFolderSlice = ExternalSlice(NodeModulesFolder); pub const PackageIDList = std.ArrayListUnmanaged(PackageID); pub const DependencyList = std.ArrayListUnmanaged(Dependency); @@ -1871,7 +1810,7 @@ pub const Lockfile = struct { const old_dependencies: []const Dependency = this.dependencies.get(old.buffers.dependencies.items); const old_resolutions: []const PackageID = this.resolutions.get(old.buffers.resolutions.items); - for (old_dependencies) |dependency, i| { + for (old_dependencies) |dependency| { dependency.count(old_string_buf, *Lockfile.StringBuilder, builder); } @@ -1954,7 +1893,7 @@ pub const Lockfile = struct { } pub fn fromNPM( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, lockfile: *Lockfile, log: *logger.Log, manifest: *const Npm.PackageManifest, @@ -1963,7 +1902,6 @@ pub const Lockfile = struct { string_buf: []const u8, comptime features: Features, ) !Lockfile.Package { - var npm_count: u32 = 0; var package = Lockfile.Package{}; const package_version = package_version_ptr.*; @@ -2057,8 +1995,6 @@ pub const Lockfile = struct { var start_dependencies = dependencies; - const off = @truncate(u32, dependencies_list.items.len); - inline for (dependency_groups) |group| { const map: ExternalStringMap = @field(package_version, group.field); const keys = map.name.get(manifest.external_strings); @@ -2075,7 +2011,7 @@ pub const Lockfile = struct { // Duplicate peer & dev dependencies are promoted to whichever appeared first // In practice, npm validates this so it shouldn't happen if (comptime group.behavior.isPeer() or group.behavior.isDev()) { - for (start_dependencies[0 .. total_dependencies_count - dependencies.len]) |dependency, j| { + for (start_dependencies[0 .. total_dependencies_count - dependencies.len]) |dependency| { if (dependency.name_hash == key.hash) { i += 1; continue :list; @@ -2171,7 +2107,7 @@ pub const Lockfile = struct { }; pub fn generate( - allocator: *std.mem.Allocator, + _: std.mem.Allocator, from_lockfile: *Lockfile, to_lockfile: *Lockfile, from: *Lockfile.Package, @@ -2180,8 +2116,6 @@ pub const Lockfile = struct { ) !Summary { var summary = Summary{}; const to_deps = to.dependencies.get(to_lockfile.buffers.dependencies.items); - const to_res = to.resolutions.get(to_lockfile.buffers.resolutions.items); - const from_res = from.resolutions.get(from_lockfile.buffers.resolutions.items); const from_deps = from.dependencies.get(from_lockfile.buffers.dependencies.items); for (from_deps) |from_dep, i| { @@ -2213,7 +2147,7 @@ pub const Lockfile = struct { outer: for (to_deps) |to_dep, i| { if (from_deps.len > i and from_deps[i].name_hash == to_dep.name_hash) continue; - for (from_deps) |from_dep, j| { + for (from_deps) |from_dep| { if (from_dep.name_hash == to_dep.name_hash) continue :outer; } @@ -2257,7 +2191,7 @@ pub const Lockfile = struct { pub fn parseMain( lockfile: *Lockfile, package: *Lockfile.Package, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, source: logger.Source, comptime features: Features, @@ -2268,7 +2202,7 @@ pub const Lockfile = struct { pub fn parse( lockfile: *Lockfile, package: *Lockfile.Package, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, source: logger.Source, comptime ResolverContext: type, @@ -2598,7 +2532,7 @@ pub const Lockfile = struct { pub fn load( stream: *Stream, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, ) !Lockfile.Package.List { var reader = stream.reader(); @@ -2649,7 +2583,7 @@ pub const Lockfile = struct { // node_modules_package_ids: PackageIDList = PackageIDList{}, string_bytes: StringBuffer = StringBuffer{}, - pub fn preallocate(this: *Buffers, that: Buffers, allocator: *std.mem.Allocator) !void { + pub fn preallocate(this: *Buffers, that: Buffers, allocator: std.mem.Allocator) !void { try this.trees.ensureTotalCapacity(allocator, that.trees.items.len); try this.resolutions.ensureTotalCapacity(allocator, that.resolutions.items.len); try this.dependencies.ensureTotalCapacity(allocator, that.dependencies.items.len); @@ -2736,8 +2670,8 @@ pub const Lockfile = struct { } } - pub fn save(this: Buffers, allocator: *std.mem.Allocator, comptime StreamType: type, stream: StreamType, comptime Writer: type, writer: Writer) !void { - inline for (sizes.names) |name, i| { + pub fn save(this: Buffers, _: std.mem.Allocator, comptime StreamType: type, stream: StreamType, comptime Writer: type, writer: Writer) !void { + inline for (sizes.names) |name| { var pos: usize = 0; if (comptime Environment.isDebug) { pos = try stream.getPos(); @@ -2805,7 +2739,7 @@ pub const Lockfile = struct { } } - pub fn load(stream: *Stream, allocator: *std.mem.Allocator, log: *logger.Log) !Buffers { + pub fn load(stream: *Stream, allocator: std.mem.Allocator, log: *logger.Log) !Buffers { var this = Buffers{}; var external_dependency_list: []Dependency.External = &[_]Dependency.External{}; inline for (sizes.names) |name, i| { @@ -2886,7 +2820,7 @@ pub const Lockfile = struct { pub fn load( lockfile: *Lockfile, stream: *Stream, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, ) !void { var reader = stream.reader(); @@ -2903,7 +2837,7 @@ pub const Lockfile = struct { } lockfile.format = .v0; lockfile.allocator = allocator; - const byte_len = try reader.readIntLittle(u64); + _ = try reader.readIntLittle(u64); lockfile.packages = try Lockfile.Package.Serializer.load( stream, @@ -2941,7 +2875,7 @@ const Task = struct { /// An ID that lets us register a callback without keeping the same pointer around pub const Id = struct { - pub fn forNPMPackage(tag: Task.Tag, package_name: string, package_version: Semver.Version) u64 { + pub fn forNPMPackage(_: Task.Tag, package_name: string, package_version: Semver.Version) u64 { var hasher = std.hash.Wyhash.init(0); hasher.update(package_name); hasher.update("@"); @@ -2955,7 +2889,7 @@ const Task = struct { } pub fn forManifest( - tag: Task.Tag, + _: Task.Tag, name: string, ) u64 { return @as(u64, @truncate(u63, std.hash.Wyhash.hash(0, name))); @@ -2978,7 +2912,7 @@ const Task = struct { &this.log, this.request.package_manifest.name.slice(), this.request.package_manifest.network.callback.package_manifest.loaded_manifest, - ) catch |err| { + ) catch { this.status = Status.fail; PackageManager.instance.resolve_tasks.writeItem(this.*) catch unreachable; return; @@ -3007,7 +2941,7 @@ const Task = struct { .extract => { const result = this.request.extract.tarball.run( this.request.extract.network.response_buffer.toOwnedSliceLeaky(), - ) catch |err| { + ) catch { this.status = Status.fail; this.data = .{ .extract = "" }; PackageManager.instance.resolve_tasks.writeItem(this.*) catch unreachable; @@ -3064,7 +2998,7 @@ const PackageInstall = struct { destination_dir_subpath: stringZ = "", destination_dir_subpath_buf: []u8, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, progress: *Progress, @@ -3083,7 +3017,7 @@ const PackageInstall = struct { skip_verify: bool = false, progress: *Progress = undefined, cache_dir: std.fs.Dir = undefined, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, }; pub const Task = struct { @@ -3106,7 +3040,6 @@ const PackageInstall = struct { var destination_dir_subpath_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; var cache_dir_subpath_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; const name = ctx.names[this.package_id].slice(ctx.string_buf); - const meta = ctx.metas[this.package_id]; const resolution = ctx.resolutions[this.package_id]; std.mem.copy(u8, &destination_dir_subpath_buf, name); destination_dir_subpath_buf[name.len] = 0; @@ -3147,7 +3080,7 @@ const PackageInstall = struct { fail: u32 = 0, success: u32 = 0, skipped: u32 = 0, - successfully_installed: ?std.DynamicBitSetUnmanaged = null, + successfully_installed: ?Bitset = null, }; pub const Method = enum { @@ -3652,7 +3585,7 @@ pub const PackageManager = struct { cache_directory: std.fs.Dir = undefined, root_dir: *Fs.FileSystem.DirEntry, env_loader: *DotEnv.Loader, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, resolve_tasks: TaskChannel, timestamp: u32 = 0, @@ -4020,13 +3953,6 @@ pub const PackageManager = struct { } } - pub fn resolvePackageFromManifest( - this: *PackageManager, - semver: Semver.Version, - version: *const Npm.PackageVersion, - manifest: *const Npm.PackageManifest, - ) !void {} - fn enqueueParseNPMPackage( this: *PackageManager, task_id: u64, @@ -4085,9 +4011,9 @@ pub const PackageManager = struct { var tmpfile = FileSystem.RealFS.Tmpfile{}; var secret: [32]u8 = undefined; std.mem.writeIntNative(u64, secret[0..8], @intCast(u64, std.time.milliTimestamp())); - var rng = std.rand.Gimli.init(secret); + var rng = std.rand.Gimli.init(secret).random(); var base64_bytes: [64]u8 = undefined; - rng.random.bytes(&base64_bytes); + rng.bytes(&base64_bytes); const tmpname__ = std.fmt.bufPrint(tmpname_buf[8..], "{s}", .{std.fmt.fmtSliceHexLower(&base64_bytes)}) catch unreachable; tmpname_buf[tmpname__.len + 8] = 0; @@ -4371,13 +4297,10 @@ pub const PackageManager = struct { this.network_resolve_batch = .{}; } - pub fn hoist(this: *PackageManager) !void {} - pub fn link(this: *PackageManager) !void {} - pub fn fetchCacheDirectoryPath( - allocator: *std.mem.Allocator, + _: std.mem.Allocator, env_loader: *DotEnv.Loader, - root_dir: *Fs.FileSystem.DirEntry, + _: *Fs.FileSystem.DirEntry, ) ?string { if (env_loader.map.get("BUN_INSTALL_CACHE_DIR")) |dir| { return dir; @@ -4580,7 +4503,7 @@ pub const PackageManager = struct { continue; } const manifest = task.data.package_manifest; - var entry = try manager.manifests.getOrPutValue(manager.allocator, @truncate(PackageNameHash, manifest.pkg.name.hash), manifest); + _ = try manager.manifests.getOrPutValue(manager.allocator, @truncate(PackageNameHash, manifest.pkg.name.hash), manifest); var dependency_list_entry = manager.task_queue.getEntry(task.id).?; var dependency_list = dependency_list_entry.value_ptr.*; @@ -4637,7 +4560,6 @@ pub const PackageManager = struct { manager.flushDependencyQueue(); - const prev_total = manager.total_tasks; { const count = batch.len + manager.network_resolve_batch.len + manager.network_tarball_batch.len; manager.pending_tasks += @truncate(u32, count); @@ -4721,7 +4643,7 @@ pub const PackageManager = struct { verbose_no_progress, pub inline fn isVerbose(this: LogLevel) bool { - return return switch (this) { + return switch (this) { .verbose_no_progress, .verbose => true, else => false, }; @@ -4741,7 +4663,7 @@ pub const PackageManager = struct { pub fn load( this: *Options, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, env_loader: *DotEnv.Loader, cli_: ?CommandLineArguments, @@ -4848,7 +4770,7 @@ pub const PackageManager = struct { if (env_loader.map.get("BUN_CONFIG_MAX_HTTP_REQUESTS")) |max_http_requests| { load: { - AsyncHTTP.max_simultaneous_requests = std.fmt.parseInt(u16, max_http_requests, 10) catch |err| { + AsyncHTTP.max_simultaneous_requests = std.fmt.parseInt(u16, max_http_requests, 10) catch { log.addErrorFmt( null, logger.Loc.Empty, @@ -5004,7 +4926,7 @@ pub const PackageManager = struct { const PackageJSONEditor = struct { pub fn edit( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, updates: []UpdateRequest, current_package_json: *JSAst.Expr, dependency_list: string, @@ -5141,7 +5063,7 @@ pub const PackageManager = struct { dependencies_object.data.e_object.packageJSONSort(); } - for (updates) |*update, j| { + for (updates) |*update| { var str = update.e_string.?; if (update.version.tag == .uninitialized) { @@ -5178,7 +5100,7 @@ pub const PackageManager = struct { package_json_file = file; } else { // can't use orelse due to a stage1 bug - package_json_file = std.fs.cwd().openFileZ("package.json", .{ .read = true, .write = true }) catch |err2| brk: { + package_json_file = std.fs.cwd().openFileZ("package.json", .{ .read = true, .write = true }) catch brk: { var this_cwd = original_cwd; outer: while (std.fs.path.dirname(this_cwd)) |parent| { cwd_buf[parent.len] = 0; @@ -5190,7 +5112,7 @@ pub const PackageManager = struct { return err; }; - break :brk std.fs.cwd().openFileZ("package.json", .{ .read = true, .write = true }) catch |err| { + break :brk std.fs.cwd().openFileZ("package.json", .{ .read = true, .write = true }) catch { this_cwd = parent; continue :outer; }; @@ -5253,7 +5175,7 @@ pub const PackageManager = struct { if (env_loader.map.get("GOMAXPROCS")) |max_procs| { if (std.fmt.parseInt(u32, max_procs, 10)) |cpu_count_| { cpu_count = @minimum(cpu_count, cpu_count_); - } else |err| {} + } else |_| {} } var manager = &instance; @@ -5405,7 +5327,7 @@ pub const PackageManager = struct { }; pub fn parse( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, comptime params: []const ParamType, ) !CommandLineArguments { var diag = clap.Diagnostic{}; @@ -5532,7 +5454,7 @@ pub const PackageManager = struct { pub const Array = std.BoundedArray(UpdateRequest, 64); pub fn parse( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, positionals: []const string, update_requests: *Array, @@ -5968,7 +5890,7 @@ pub const PackageManager = struct { std.mem.copy(u8, &node_modules_buf, entry.name); node_modules_buf[entry.name.len] = 0; var buf: [:0]u8 = node_modules_buf[0..entry.name.len :0]; - var file = node_modules_bin.openFileZ(buf, .{ .read = true }) catch |err| { + var file = node_modules_bin.openFileZ(buf, .{ .read = true }) catch { node_modules_bin.deleteFileZ(buf) catch {}; continue :iterator; }; @@ -6033,7 +5955,7 @@ pub const PackageManager = struct { has_created_bin: bool = false, destination_dir_subpath_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined, install_count: usize = 0, - successfully_installed: std.DynamicBitSetUnmanaged, + successfully_installed: Bitset, // For linking native binaries, we only want to link after we've installed the companion dependencies // We don't want to introduce dependent callbacks like that for every single package @@ -6287,7 +6209,6 @@ pub const PackageManager = struct { progress.* = .{}; } } - const cache_dir = this.cache_directory; lockfile.unique_packages.unset(0); @@ -6329,8 +6250,7 @@ pub const PackageManager = struct { const resolutions_buffer: []const PackageID = lockfile.buffers.resolutions.items; const resolution_lists: []const Lockfile.PackageIDSlice = parts.items(.resolutions); var resolutions = parts.items(.resolution); - const end = @truncate(PackageID, names.len); - const pending_task_offset = this.total_tasks; + var iterator = Lockfile.Tree.Iterator.init( lockfile.buffers.trees.items, lockfile.buffers.hoisted_packages.items, @@ -6355,7 +6275,7 @@ pub const PackageManager = struct { .summary = &summary, .force_install = force_install, .install_count = lockfile.buffers.hoisted_packages.items.len, - .successfully_installed = try std.DynamicBitSetUnmanaged.initEmpty(lockfile.packages.len, this.allocator), + .successfully_installed = try Bitset.initEmpty(lockfile.packages.len, this.allocator), }; const cwd = std.fs.cwd(); @@ -6426,7 +6346,7 @@ pub const PackageManager = struct { const package_resolutions: []const PackageID = resolution_lists[package_id].get(resolutions_buffer); const original_bin: Bin = installer.bins[package_id]; - for (package_dependencies) |dependency, i| { + for (package_dependencies) |_, i| { const resolved_id = package_resolutions[i]; if (resolved_id >= names.len) continue; const meta: Lockfile.Package.Meta = metas[resolved_id]; @@ -6617,7 +6537,7 @@ pub const PackageManager = struct { // ensure we use one pointer to reference it instead of creating new ones and potentially aliasing var builder = &builder_; - for (new_dependencies) |new_dep, i| { + for (new_dependencies) |new_dep| { new_dep.count(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder); } @@ -6626,7 +6546,6 @@ pub const PackageManager = struct { var packages = manager.lockfile.packages.slice(); var dep_lists = packages.items(.dependencies); var resolution_lists = packages.items(.resolutions); - const old_dependencies_list = dep_lists[0]; const old_resolutions_list = resolution_lists[0]; dep_lists[0] = .{ .off = off, .len = len }; resolution_lists[0] = .{ .off = off, .len = len }; diff --git a/src/install/integrity.zig b/src/install/integrity.zig index debe861a4..1a52b5b1a 100644 --- a/src/install/integrity.zig +++ b/src/install/integrity.zig @@ -66,8 +66,6 @@ pub const Integrity = extern struct { i += 1; } - var remainder = &integrity.value[out_i..]; - return integrity; } @@ -146,7 +144,7 @@ pub const Integrity = extern struct { return this.value[0..this.tag.digestLen()]; } - pub fn format(this: *const Integrity, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(this: *const Integrity, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { switch (this.tag) { .sha1 => try writer.writeAll("sha1-"), .sha256 => try writer.writeAll("sha256-"), diff --git a/src/install/npm.zig b/src/install/npm.zig index a6cded5dc..fb1ff523a 100644 --- a/src/install/npm.zig +++ b/src/install/npm.zig @@ -24,6 +24,7 @@ const IdentityContext = @import("../identity_context.zig").IdentityContext; const ArrayIdentityContext = @import("../identity_context.zig").ArrayIdentityContext; const SlicedString = Semver.SlicedString; const FileSystem = @import("../fs.zig").FileSystem; +const Dependency = @import("./dependency.zig"); const VersionSlice = @import("./install.zig").VersionSlice; const ObjectPool = @import("../pool.zig").ObjectPool; @@ -69,7 +70,7 @@ pub const Registry = struct { const Pico = @import("picohttp"); pub fn getPackageMetadata( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, response: Pico.Response, body: []const u8, log: *logger.Log, @@ -481,7 +482,7 @@ pub const PackageManifest = struct { try std.os.renameatZ(tmpdir.fd, tmp_path, cache_dir.fd, out_path); } - pub fn load(allocator: *std.mem.Allocator, cache_dir: std.fs.Dir, package_name: string) !?PackageManifest { + pub fn load(allocator: std.mem.Allocator, cache_dir: std.fs.Dir, package_name: string) !?PackageManifest { const file_id = std.hash.Wyhash.hash(0, package_name); var file_path_buf: [512 + 64]u8 = undefined; var file_path = try std.fmt.bufPrintZ(&file_path_buf, "{x}.npm", .{file_id}); @@ -538,11 +539,6 @@ pub const PackageManifest = struct { } pub fn reportSize(this: *const PackageManifest) void { - const versions = std.mem.sliceAsBytes(this.versions); - const external_strings = std.mem.sliceAsBytes(this.external_strings); - const package_versions = std.mem.sliceAsBytes(this.package_versions); - const string_buf = std.mem.sliceAsBytes(this.string_buf); - Output.prettyErrorln( \\ Versions count: {d} \\ External Strings count: {d} @@ -668,7 +664,7 @@ pub const PackageManifest = struct { /// This parses [Abbreviated metadata](https://github.com/npm/registry/blob/master/docs/responses/package-metadata.md#abbreviated-metadata-format) pub fn parse( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, log: *logger.Log, json_buffer: []const u8, expected_name: []const u8, @@ -678,9 +674,7 @@ pub const PackageManifest = struct { ) !?PackageManifest { const source = logger.Source.initPathString(expected_name, json_buffer); initializeStore(); - const json = json_parser.ParseJSON(&source, log, allocator) catch |err| { - return null; - }; + const json = json_parser.ParseJSON(&source, log, allocator) catch return null; if (json.asProperty("error")) |error_q| { if (error_q.expr.asString(allocator)) |err| { @@ -776,7 +770,7 @@ pub const PackageManifest = struct { string_builder.count(obj.properties[0].value.?.asString(allocator) orelse break :bin); } }, - .e_string => |str| { + .e_string => { if (bin.expr.asString(allocator)) |str_| { string_builder.count(str_); break :bin; @@ -894,7 +888,6 @@ pub const PackageManifest = struct { result.pkg.name = string_builder.append(ExternalString, field); } - var string_slice = SlicedString.init(string_buf, string_buf); get_versions: { if (json.asProperty("versions")) |versions_q| { if (versions_q.expr.data != .e_object) break :get_versions; @@ -909,7 +902,7 @@ pub const PackageManifest = struct { var dependency_names = all_dependency_names_and_values; var version_string__: String = String{}; - for (versions) |prop, version_i| { + for (versions) |prop| { const version_name = prop.key.?.asString(allocator) orelse continue; var sliced_string = SlicedString.init(version_name, version_name); @@ -1259,14 +1252,13 @@ pub const PackageManifest = struct { var extern_strings_slice = extern_strings[0..dist_tags_count]; var dist_tag_i: usize = 0; - for (tags) |tag, i| { + for (tags) |tag| { if (tag.key.?.asString(allocator)) |key| { extern_strings_slice[dist_tag_i] = string_builder.append(ExternalString, key); const version_name = tag.value.?.asString(allocator) orelse continue; const dist_tag_value_literal = string_builder.append(ExternalString, version_name); - const dist_tag_value_literal_slice = dist_tag_value_literal.slice(string_buf); const sliced_string = dist_tag_value_literal.value.sliced(string_buf); diff --git a/src/install/repository.zig b/src/install/repository.zig index f93402ea4..7ea9ce470 100644 --- a/src/install/repository.zig +++ b/src/install/repository.zig @@ -49,7 +49,7 @@ pub const Repository = extern struct { label: []const u8 = "", buf: []const u8, repository: Repository, - pub fn format(formatter: Formatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(formatter: Formatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { std.debug.assert(formatter.label.len > 0); try writer.writeAll(formatter.label); diff --git a/src/install/resolvers/folder_resolver.zig b/src/install/resolvers/folder_resolver.zig index 1d40058f8..b7261c9f1 100644 --- a/src/install/resolvers/folder_resolver.zig +++ b/src/install/resolvers/folder_resolver.zig @@ -32,7 +32,7 @@ pub const FolderResolution = union(Tag) { pub const Resolver = struct { folder_path: string, - pub fn resolve(this: Resolver, comptime Builder: type, builder: Builder, json: JSAst.Expr) !Resolution { + pub fn resolve(this: Resolver, comptime Builder: type, builder: Builder, _: JSAst.Expr) !Resolution { return Resolution{ .tag = .folder, .value = .{ @@ -41,7 +41,7 @@ pub const FolderResolution = union(Tag) { }; } - pub fn count(this: Resolver, comptime Builder: type, builder: Builder, json: JSAst.Expr) void { + pub fn count(this: Resolver, comptime Builder: type, builder: Builder, _: JSAst.Expr) void { builder.count(this.folder_path); } }; @@ -51,7 +51,6 @@ pub const FolderResolution = union(Tag) { // We consider it valid if there is a package.json in the folder const normalized = std.mem.trimRight(u8, normalize(non_normalized_path), std.fs.path.sep_str); var joined: [std.fs.MAX_PATH_BYTES]u8 = undefined; - var rel_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; var abs: string = ""; var rel: string = ""; if (strings.startsWithChar(normalized, '.')) { @@ -86,7 +85,6 @@ pub const FolderResolution = union(Tag) { var body = Npm.Registry.BodyPool.get(manager.allocator); defer Npm.Registry.BodyPool.release(body); - const initial_errors_count = manager.log.errors; const len = package_json.getEndPos() catch |err| { entry.value_ptr.* = .{ .err = err }; return entry.value_ptr.*; diff --git a/src/install/semver.zig b/src/install/semver.zig index ce0208a0e..794867f13 100644 --- a/src/install/semver.zig +++ b/src/install/semver.zig @@ -1,4 +1,13 @@ -usingnamespace @import("../global.zig"); +const _global = @import("../global.zig"); +const string = _global.string; +const Output = _global.Output; +const Global = _global.Global; +const Environment = _global.Environment; +const strings = _global.strings; +const MutableString = _global.MutableString; +const stringZ = _global.stringZ; +const default_allocator = _global.default_allocator; +const C = _global.C; const std = @import("std"); /// String type that stores either an offset/length into an external buffer or a string inline directly @@ -26,7 +35,7 @@ pub const String = extern struct { str: *const String, buf: string, - pub fn format(formatter: Formatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(formatter: Formatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { const str = formatter.str; try writer.writeAll(str.slice(formatter.buf)); } @@ -216,7 +225,7 @@ pub const String = extern struct { else &[_]u8{}; } - pub fn allocate(this: *Builder, allocator: *std.mem.Allocator) !void { + pub fn allocate(this: *Builder, allocator: std.mem.Allocator) !void { var ptr_ = try allocator.alloc(u8, this.cap); this.ptr = ptr_.ptr; } @@ -491,7 +500,7 @@ pub const Version = extern struct { version: Version, input: string, - pub fn format(formatter: Formatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(formatter: Formatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { const self = formatter.version; try std.fmt.format(writer, "{d}.{d}.{d}", .{ self.major, self.minor, self.patch }); @@ -509,25 +518,16 @@ pub const Version = extern struct { } }; - inline fn atPart(i: u8) u32 { - return switch (i) { - 0 => self.major, - 1 => self.minor, - 2 => self.patch, - else => unreachable, - }; - } - pub fn eql(lhs: Version, rhs: Version) bool { return lhs.major == rhs.major and lhs.minor == rhs.minor and lhs.patch == rhs.patch and rhs.tag.eql(lhs.tag); } pub const HashContext = struct { - pub fn hash(this: @This(), lhs: Version) u32 { + pub fn hash(_: @This(), lhs: Version) u32 { return @truncate(u32, lhs.hash()); } - pub fn eql(this: @This(), lhs: Version, rhs: Version) bool { + pub fn eql(_: @This(), lhs: Version, rhs: Version) bool { return lhs.eql(rhs); } }; @@ -628,7 +628,7 @@ pub const Version = extern struct { var multi_tag_warn = false; // TODO: support multiple tags - pub fn parse(allocator: *std.mem.Allocator, sliced_string: SlicedString) TagResult { + pub fn parse(_: std.mem.Allocator, sliced_string: SlicedString) TagResult { var input = sliced_string.slice; var build_count: u32 = 0; var pre_count: u32 = 0; @@ -658,9 +658,6 @@ pub const Version = extern struct { var state = State.none; var start: usize = 0; - var tag_i: usize = 0; - var had_content = false; - var i: usize = 0; while (i < input.len) : (i += 1) { @@ -743,7 +740,7 @@ pub const Version = extern struct { stopped_at: u32 = 0, }; - pub fn parse(sliced_string: SlicedString, allocator: *std.mem.Allocator) ParseResult { + pub fn parse(sliced_string: SlicedString, allocator: std.mem.Allocator) ParseResult { var input = sliced_string.slice; var result = ParseResult{}; @@ -918,7 +915,7 @@ pub const Version = extern struct { std.debug.assert(input[0] != '.'); - for (input) |char, i| { + for (input) |char| { switch (char) { 'X', 'x', '*' => return 0, '0'...'9' => { @@ -1132,7 +1129,7 @@ pub const Query = struct { return lhs_next.eql(rhs_next); } - pub fn andRange(self: *List, allocator: *std.mem.Allocator, range: Range) !void { + pub fn andRange(self: *List, allocator: std.mem.Allocator, range: Range) !void { if (!self.head.range.hasLeft() and !self.head.range.hasRight()) { self.head.range = range; return; @@ -1153,7 +1150,7 @@ pub const Query = struct { pub const Group = struct { head: List = List{}, tail: ?*List = null, - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, input: string = "", flags: FlagsBitSet = FlagsBitSet.initEmpty(), @@ -1432,7 +1429,7 @@ pub const Query = struct { }; pub fn parse( - allocator: *std.mem.Allocator, + allocator: std.mem.Allocator, input: string, sliced: SlicedString, ) !Group { @@ -1448,9 +1445,6 @@ pub const Query = struct { var count: u8 = 0; var skip_round = false; var is_or = false; - var enable_hyphen = false; - - var last_non_whitespace: usize = 0; while (i < input.len) { skip_round = false; |