aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-05-15 17:23:55 -0700
committerGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-05-15 17:23:55 -0700
commite80f865974df7aae5e2f6abb966b36497da693c6 (patch)
treee82bcf4860c1fc43de189f13a18e8526a2966f44 /src
parentc88625436cf866bdaa68249d10880a2271f611e0 (diff)
downloadbun-e80f865974df7aae5e2f6abb966b36497da693c6.tar.gz
bun-e80f865974df7aae5e2f6abb966b36497da693c6.tar.zst
bun-e80f865974df7aae5e2f6abb966b36497da693c6.zip
lots
Former-commit-id: d8b1d296562a01800248bd1148bc4778225b436e
Diffstat (limited to 'src')
-rw-r--r--src/api/demo/pages/index.jsx (renamed from src/api/demo/pages/index.js)0
-rw-r--r--src/bundler.zig4
-rw-r--r--src/cli.zig18
-rw-r--r--src/fs.zig2
-rw-r--r--src/hash_map_v2.zig1818
-rw-r--r--src/js_ast.zig8
-rw-r--r--src/js_lexer.zig21
-rw-r--r--src/js_parser/js_parser.zig295
-rw-r--r--src/js_parser/js_parser_test.zig14
-rw-r--r--src/js_printer.zig4
-rw-r--r--src/json_parser.zig31
-rw-r--r--src/logger.zig2
-rw-r--r--src/options.zig12
-rw-r--r--src/resolver/resolver.zig262
-rw-r--r--src/test/fixtures/cannot-assign-to-import-bug.js369
-rw-r--r--src/test/fixtures/img-bug.js105
-rw-r--r--src/test/fixtures/react-dev-crash.js108
17 files changed, 2801 insertions, 272 deletions
diff --git a/src/api/demo/pages/index.js b/src/api/demo/pages/index.jsx
index 9523fbc8b..9523fbc8b 100644
--- a/src/api/demo/pages/index.js
+++ b/src/api/demo/pages/index.jsx
diff --git a/src/bundler.zig b/src/bundler.zig
index 6ea4d41a8..34983a9b8 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -110,7 +110,7 @@ pub const Bundler = struct {
) !void {
var allocator = bundler.allocator;
const relative_path = try std.fs.path.relative(bundler.allocator, bundler.fs.top_level_dir, result.source.path.text);
- var out_parts = [_]string{ bundler.fs.top_level_dir, relative_path };
+ var out_parts = [_]string{ bundler.options.output_dir, relative_path };
const out_path = try std.fs.path.join(bundler.allocator, &out_parts);
const ast = result.ast;
@@ -302,7 +302,7 @@ pub const Transformer = struct {
);
const cwd = opts.absolute_working_dir orelse try std.process.getCwdAlloc(allocator);
- const output_dir_parts = [_]string{ cwd, opts.output_dir orelse "out" };
+ const output_dir_parts = [_]string{ try std.process.getCwdAlloc(allocator), opts.output_dir orelse "out" };
const output_dir = try std.fs.path.join(allocator, &output_dir_parts);
var output_files = try std.ArrayList(options.OutputFile).initCapacity(allocator, opts.entry_points.len);
var loader_values = try allocator.alloc(options.Loader, opts.loader_values.len);
diff --git a/src/cli.zig b/src/cli.zig
index d26e41d65..01240482d 100644
--- a/src/cli.zig
+++ b/src/cli.zig
@@ -310,18 +310,18 @@ pub const Cli = struct {
did_write = true;
var root_dir = try std.fs.openDirAbsolute(args.absolute_working_dir.?, std.fs.Dir.OpenDirOptions{});
defer root_dir.close();
- // for (result.output_files) |f| {
- // try root_dir.makePath(std.fs.path.dirname(f.path) orelse unreachable);
+ for (result.output_files) |f| {
+ try root_dir.makePath(std.fs.path.dirname(f.path) orelse unreachable);
- // var _handle = try std.fs.createFileAbsolute(f.path, std.fs.File.CreateFlags{
- // .truncate = true,
- // });
- // try _handle.seekTo(0);
+ var _handle = try std.fs.createFileAbsolute(f.path, std.fs.File.CreateFlags{
+ .truncate = true,
+ });
+ try _handle.seekTo(0);
- // defer _handle.close();
+ defer _handle.close();
- // try _handle.writeAll(f.contents);
- // }
+ try _handle.writeAll(f.contents);
+ }
var max_path_len: usize = 0;
var max_padded_size: usize = 0;
diff --git a/src/fs.zig b/src/fs.zig
index ed541d588..86a6550c4 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -10,8 +10,6 @@ const resolvePath = @import("./resolver/resolve_path.zig").resolvePath;
// pub const FilesystemImplementation = @import("fs_impl.zig");
-//
-
threadlocal var scratch_lookup_buffer = [_]u8{0} ** 255;
pub const FileSystem = struct {
diff --git a/src/hash_map_v2.zig b/src/hash_map_v2.zig
new file mode 100644
index 000000000..cad4bd36c
--- /dev/null
+++ b/src/hash_map_v2.zig
@@ -0,0 +1,1818 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2021 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+const std = @import("std");
+const assert = debug.assert;
+const autoHash = std.hash.autoHash;
+const debug = std.debug;
+const warn = debug.warn;
+const math = std.math;
+const mem = std.mem;
+const meta = std.meta;
+const trait = meta.trait;
+const Allocator = mem.Allocator;
+const Wyhash = std.hash.Wyhash;
+
+/// Finds the max of three numbers
+pub fn math_max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) {
+ return std.math.max(x, std.math.max(y, z));
+}
+
+pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u64) {
+ comptime {
+ assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated
+ if (K == []const u8) {
+ @compileError("std.auto_hash.autoHash does not allow slices here (" ++
+ @typeName(K) ++
+ ") because the intent is unclear. " ++
+ "Consider using std.StringHashMap for hashing the contents of []const u8. " ++
+ "Alternatively, consider using std.auto_hash.hash or providing your own hash function instead.");
+ }
+ }
+
+ return struct {
+ fn hash(ctx: Context, key: K) u64 {
+ if (comptime trait.hasUniqueRepresentation(K)) {
+ return Wyhash.hash(0, std.mem.asBytes(&key));
+ } else {
+ var hasher = Wyhash.init(0);
+ autoHash(&hasher, key);
+ return hasher.final();
+ }
+ }
+ }.hash;
+}
+
+pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
+ return struct {
+ fn eql(ctx: Context, a: K, b: K) bool {
+ return meta.eql(a, b);
+ }
+ }.eql;
+}
+
+pub fn AutoHashMap(comptime K: type, comptime V: type) type {
+ return HashMap(K, V, AutoContext(K), default_max_load_percentage);
+}
+
+pub fn AutoHashMapUnmanaged(comptime K: type, comptime V: type) type {
+ return HashMapUnmanaged(K, V, AutoContext(K), default_max_load_percentage);
+}
+
+pub fn AutoContext(comptime K: type) type {
+ return struct {
+ pub const hash = getAutoHashFn(K, @This());
+ pub const eql = getAutoEqlFn(K, @This());
+ };
+}
+
+/// Builtin hashmap for strings as keys.
+/// Key memory is managed by the caller. Keys and values
+/// will not automatically be freed.
+pub fn StringHashMap(comptime V: type) type {
+ return HashMap([]const u8, V, StringContext, default_max_load_percentage);
+}
+
+/// Key memory is managed by the caller. Keys and values
+/// will not automatically be freed.
+pub fn StringHashMapUnmanaged(comptime V: type) type {
+ return HashMapUnmanaged([]const u8, V, StringContext, default_max_load_percentage);
+}
+
+pub const StringContext = struct {
+ pub fn hash(self: @This(), s: []const u8) u64 {
+ return hashString(s);
+ }
+ pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
+ return eqlString(a, b);
+ }
+};
+
+pub fn eqlString(a: []const u8, b: []const u8) bool {
+ return mem.eql(u8, a, b);
+}
+
+pub fn hashString(s: []const u8) u64 {
+ return std.hash.Wyhash.hash(0, s);
+}
+
+/// Deprecated use `default_max_load_percentage`
+pub const DefaultMaxLoadPercentage = default_max_load_percentage;
+
+pub const default_max_load_percentage = 80;
+
+/// This function issues a compile error with a helpful message if there
+/// is a problem with the provided context type. A context must have the following
+/// member functions:
+/// - hash(self, PseudoKey) Hash
+/// - eql(self, PseudoKey, Key) bool
+/// If you are passing a context to a *Adapted function, PseudoKey is the type
+/// of the key parameter. Otherwise, when creating a HashMap or HashMapUnmanaged
+/// type, PseudoKey = Key = K.
+pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, comptime Key: type, comptime Hash: type) void {
+ comptime {
+ var allow_const_ptr = false;
+ var allow_mutable_ptr = false;
+ // Context is the actual namespace type. RawContext may be a pointer to Context.
+ var Context = RawContext;
+ // Make sure the context is a namespace type which may have member functions
+ switch (@typeInfo(Context)) {
+ .Struct, .Union => {},
+ // Special-case .Opaque for a better error message
+ .Opaque => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context) ++ " because it is opaque. Use a pointer instead."),
+ .Pointer => |ptr| {
+ if (ptr.size != .One) {
+ @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context) ++ " because it is not a single pointer.");
+ }
+ Context = ptr.child;
+ allow_const_ptr = true;
+ allow_mutable_ptr = !ptr.is_const;
+ switch (@typeInfo(Context)) {
+ .Struct, .Union, .Opaque => {},
+ else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context)),
+ }
+ },
+ else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context)),
+ }
+
+ // Keep track of multiple errors so we can report them all.
+ var errors: []const u8 = "";
+
+ // Put common errors here, they will only be evaluated
+ // if the error is actually triggered.
+ const lazy = struct {
+ const prefix = "\n ";
+ const deep_prefix = prefix ++ " ";
+ const hash_signature = "fn (self, " ++ @typeName(PseudoKey) ++ ") " ++ @typeName(Hash);
+ const eql_signature = "fn (self, " ++ @typeName(PseudoKey) ++ ", " ++ @typeName(Key) ++ ") bool";
+ const err_invalid_hash_signature = prefix ++ @typeName(Context) ++ ".hash must be " ++ hash_signature ++
+ deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.hash));
+ const err_invalid_eql_signature = prefix ++ @typeName(Context) ++ ".eql must be " ++ eql_signature ++
+ deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.eql));
+ };
+
+ // Verify Context.hash(self, PseudoKey) => Hash
+ if (@hasDecl(Context, "hash")) {
+ const hash = Context.hash;
+ const info = @typeInfo(@TypeOf(hash));
+ if (info == .Fn) {
+ const func = info.Fn;
+ if (func.args.len != 2) {
+ errors = errors ++ lazy.err_invalid_hash_signature;
+ } else {
+ var emitted_signature = false;
+ if (func.args[0].arg_type) |Self| {
+ if (Self == Context) {
+ // pass, this is always fine.
+ } else if (Self == *const Context) {
+ if (!allow_const_ptr) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_hash_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self);
+ errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value.";
+ }
+ } else if (Self == *Context) {
+ if (!allow_mutable_ptr) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_hash_signature;
+ emitted_signature = true;
+ }
+ if (!allow_const_ptr) {
+ errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self);
+ errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value.";
+ } else {
+ errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ " or " ++ @typeName(*const Context) ++ ", but is " ++ @typeName(Self);
+ errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be non-const because it is passed by const pointer.";
+ }
+ }
+ } else {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_hash_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context);
+ if (allow_const_ptr) {
+ errors = errors ++ " or " ++ @typeName(*const Context);
+ if (allow_mutable_ptr) {
+ errors = errors ++ " or " ++ @typeName(*Context);
+ }
+ }
+ errors = errors ++ ", but is " ++ @typeName(Self);
+ }
+ }
+ if (func.args[1].arg_type != null and func.args[1].arg_type.? != PseudoKey) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_hash_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.args[1].arg_type.?);
+ }
+ if (func.return_type != null and func.return_type.? != Hash) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_hash_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "Return type must be " ++ @typeName(Hash) ++ ", but was " ++ @typeName(func.return_type.?);
+ }
+ // If any of these are generic (null), we cannot verify them.
+ // The call sites check the return type, but cannot check the
+ // parameters. This may cause compile errors with generic hash/eql functions.
+ }
+ } else {
+ errors = errors ++ lazy.err_invalid_hash_signature;
+ }
+ } else {
+ errors = errors ++ lazy.prefix ++ @typeName(Context) ++ " must declare a hash function with signature " ++ lazy.hash_signature;
+ }
+
+ // Verify Context.eql(self, PseudoKey, Key) => bool
+ if (@hasDecl(Context, "eql")) {
+ const eql = Context.eql;
+ const info = @typeInfo(@TypeOf(eql));
+ if (info == .Fn) {
+ const func = info.Fn;
+ if (func.args.len != 3) {
+ errors = errors ++ lazy.err_invalid_eql_signature;
+ } else {
+ var emitted_signature = false;
+ if (func.args[0].arg_type) |Self| {
+ if (Self == Context) {
+ // pass, this is always fine.
+ } else if (Self == *const Context) {
+ if (!allow_const_ptr) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_eql_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self);
+ errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value.";
+ }
+ } else if (Self == *Context) {
+ if (!allow_mutable_ptr) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_eql_signature;
+ emitted_signature = true;
+ }
+ if (!allow_const_ptr) {
+ errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self);
+ errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value.";
+ } else {
+ errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ " or " ++ @typeName(*const Context) ++ ", but is " ++ @typeName(Self);
+ errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be non-const because it is passed by const pointer.";
+ }
+ }
+ } else {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_eql_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context);
+ if (allow_const_ptr) {
+ errors = errors ++ " or " ++ @typeName(*const Context);
+ if (allow_mutable_ptr) {
+ errors = errors ++ " or " ++ @typeName(*Context);
+ }
+ }
+ errors = errors ++ ", but is " ++ @typeName(Self);
+ }
+ }
+ if (func.args[1].arg_type.? != PseudoKey) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_eql_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.args[1].arg_type.?);
+ }
+ if (func.args[2].arg_type.? != Key) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_eql_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "Third parameter must be " ++ @typeName(Key) ++ ", but is " ++ @typeName(func.args[2].arg_type.?);
+ }
+ if (func.return_type.? != bool) {
+ if (!emitted_signature) {
+ errors = errors ++ lazy.err_invalid_eql_signature;
+ emitted_signature = true;
+ }
+ errors = errors ++ lazy.deep_prefix ++ "Return type must be bool, but was " ++ @typeName(func.return_type.?);
+ }
+ // If any of these are generic (null), we cannot verify them.
+ // The call sites check the return type, but cannot check the
+ // parameters. This may cause compile errors with generic hash/eql functions.
+ }
+ } else {
+ errors = errors ++ lazy.err_invalid_eql_signature;
+ }
+ } else {
+ errors = errors ++ lazy.prefix ++ @typeName(Context) ++ " must declare a eql function with signature " ++ lazy.eql_signature;
+ }
+
+ if (errors.len != 0) {
+ // errors begins with a newline (from lazy.prefix)
+ @compileError("Problems found with hash context type " ++ @typeName(Context) ++ ":" ++ errors);
+ }
+ }
+}
+
+/// General purpose hash table.
+/// No order is guaranteed and any modification invalidates live iterators.
+/// It provides fast operations (lookup, insertion, deletion) with quite high
+/// load factors (up to 80% by default) for a low memory usage.
+/// For a hash map that can be initialized directly that does not store an Allocator
+/// field, see `HashMapUnmanaged`.
+/// If iterating over the table entries is a strong usecase and needs to be fast,
+/// prefer the alternative `std.ArrayHashMap`.
+/// Context must be a struct type with two member functions:
+/// hash(self, K) u64
+/// eql(self, K, K) bool
+/// Adapted variants of many functions are provided. These variants
+/// take a pseudo key instead of a key. Their context must have the functions:
+/// hash(self, PseudoKey) u64
+/// eql(self, PseudoKey, K) bool
+pub fn HashMap(
+ comptime K: type,
+ comptime V: type,
+ comptime Context: type,
+ comptime max_load_percentage: u64,
+) type {
+ comptime verifyContext(Context, K, K, u64);
+ return struct {
+ unmanaged: Unmanaged,
+ allocator: *Allocator,
+ ctx: Context,
+
+ /// The type of the unmanaged hash map underlying this wrapper
+ pub const Unmanaged = HashMapUnmanaged(K, V, Context, max_load_percentage);
+ /// An entry, containing pointers to a key and value stored in the map
+ pub const Entry = Unmanaged.Entry;
+ /// A copy of a key and value which are no longer in the map
+ pub const KV = Unmanaged.KV;
+ /// The integer type that is the result of hashing
+ pub const Hash = Unmanaged.Hash;
+ /// The iterator type returned by iterator()
+ pub const Iterator = Unmanaged.Iterator;
+ /// The integer type used to store the size of the map
+ pub const Size = Unmanaged.Size;
+ /// The type returned from getOrPut and variants
+ pub const GetOrPutResult = Unmanaged.GetOrPutResult;
+
+ const Self = @This();
+
+ /// Create a managed hash map with an empty context.
+ /// If the context is not zero-sized, you must use
+ /// initContext(allocator, ctx) instead.
+ pub fn init(allocator: *Allocator) Self {
+ if (@sizeOf(Context) != 0) {
+ @compileError("Context must be specified! Call initContext(allocator, ctx) instead.");
+ }
+ return .{
+ .unmanaged = .{},
+ .allocator = allocator,
+ .ctx = undefined, // ctx is zero-sized so this is safe.
+ };
+ }
+
+ /// Create a managed hash map with a context
+ pub fn initContext(allocator: *Allocator, ctx: Context) Self {
+ return .{
+ .unmanaged = .{},
+ .allocator = allocator,
+ .ctx = ctx,
+ };
+ }
+
+ /// Release the backing array and invalidate this map.
+ /// This does *not* deinit keys, values, or the context!
+ /// If your keys or values need to be released, ensure
+ /// that that is done before calling this function.
+ pub fn deinit(self: *Self) void {
+ self.unmanaged.deinit(self.allocator);
+ self.* = undefined;
+ }
+
+ /// Empty the map, but keep the backing allocation for future use.
+ /// This does *not* free keys or values! Be sure to
+ /// release them if they need deinitialization before
+ /// calling this function.
+ pub fn clearRetainingCapacity(self: *Self) void {
+ return self.unmanaged.clearRetainingCapacity();
+ }
+
+ /// Empty the map and release the backing allocation.
+ /// This does *not* free keys or values! Be sure to
+ /// release them if they need deinitialization before
+ /// calling this function.
+ pub fn clearAndFree(self: *Self) void {
+ return self.unmanaged.clearAndFree(self.allocator);
+ }
+
+ /// Return the number of items in the map.
+ pub fn count(self: Self) Size {
+ return self.unmanaged.count();
+ }
+
+ /// Create an iterator over the entries in the map.
+ /// The iterator is invalidated if the map is modified.
+ pub fn iterator(self: *const Self) Iterator {
+ return self.unmanaged.iterator();
+ }
+
+ /// If key exists this function cannot fail.
+ /// If there is an existing item with `key`, then the result
+ /// `Entry` pointers point to it, and found_existing is true.
+ /// Otherwise, puts a new item with undefined value, and
+ /// the `Entry` pointers point to it. Caller should then initialize
+ /// the value (but not the key).
+ pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
+ return self.unmanaged.getOrPutContext(self.allocator, key, self.ctx);
+ }
+
+ /// If key exists this function cannot fail.
+ /// If there is an existing item with `key`, then the result
+ /// `Entry` pointers point to it, and found_existing is true.
+ /// Otherwise, puts a new item with undefined key and value, and
+ /// the `Entry` pointers point to it. Caller must then initialize
+ /// the key and value.
+ pub fn getOrPutAdapted(self: *Self, key: anytype, ctx: anytype) !GetOrPutResult {
+ return self.unmanaged.getOrPutContextAdapted(self.allocator, key, ctx, self.ctx);
+ }
+
+ /// If there is an existing item with `key`, then the result
+ /// `Entry` pointers point to it, and found_existing is true.
+ /// Otherwise, puts a new item with undefined value, and
+ /// the `Entry` pointers point to it. Caller should then initialize
+ /// the value (but not the key).
+ /// If a new entry needs to be stored, this function asserts there
+ /// is enough capacity to store it.
+ pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
+ return self.unmanaged.getOrPutAssumeCapacityContext(key, self.ctx);
+ }
+
+ /// If there is an existing item with `key`, then the result
+ /// `Entry` pointers point to it, and found_existing is true.
+ /// Otherwise, puts a new item with undefined value, and
+ /// the `Entry` pointers point to it. Caller must then initialize
+ /// the key and value.
+ /// If a new entry needs to be stored, this function asserts there
+ /// is enough capacity to store it.
+ pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
+ return self.unmanaged.getOrPutAssumeCapacityAdapted(self.allocator, key, ctx);
+ }
+
+ pub fn getOrPutValue(self: *Self, key: K, value: V) !Entry {
+ return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx);
+ }
+
+ /// Increases capacity, guaranteeing that insertions up until the
+ /// `expected_count` will not cause an allocation, and therefore cannot fail.
+ pub fn ensureCapacity(self: *Self, expected_count: Size) !void {
+ return self.unmanaged.ensureCapacityContext(self.allocator, expected_count, self.ctx);
+ }
+
+ /// Returns the number of total elements which may be present before it is
+ /// no longer guaranteed that no allocations will be performed.
+ pub fn capacity(self: *Self) Size {
+ return self.unmanaged.capacity();
+ }
+
+ /// Clobbers any existing data. To detect if a put would clobber
+ /// existing data, see `getOrPut`.
+ pub fn put(self: *Self, key: K, value: V) !void {
+ return self.unmanaged.putContext(self.allocator, key, value, self.ctx);
+ }
+
+ /// Inserts a key-value pair into the hash map, asserting that no previous
+ /// entry with the same key is already present
+ pub fn putNoClobber(self: *Self, key: K, value: V) !void {
+ return self.unmanaged.putNoClobberContext(self.allocator, key, value, self.ctx);
+ }
+
+ /// Asserts there is enough capacity to store the new key-value pair.
+ /// Clobbers any existing data. To detect if a put would clobber
+ /// existing data, see `getOrPutAssumeCapacity`.
+ pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
+ return self.unmanaged.putAssumeCapacityContext(key, value, self.ctx);
+ }
+
+ /// Asserts there is enough capacity to store the new key-value pair.
+ /// Asserts that it does not clobber any existing data.
+ /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
+ pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
+ return self.unmanaged.putAssumeCapacityNoClobberContext(key, value, self.ctx);
+ }
+
+ /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
+ pub fn fetchPut(self: *Self, key: K, value: V) !?KV {
+ return self.unmanaged.fetchPutContext(self.allocator, key, value, self.ctx);
+ }
+
+ /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
+ /// If insertion happuns, asserts there is enough capacity without allocating.
+ pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
+ return self.unmanaged.fetchPutAssumeCapacityContext(key, value, self.ctx);
+ }
+
+ /// Removes a value from the map and returns the removed kv pair.
+ pub fn fetchRemove(self: *Self, key: K) ?KV {
+ return self.unmanaged.fetchRemoveContext(key, self.ctx);
+ }
+
+ pub fn fetchRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
+ return self.unmanaged.fetchRemoveAdapted(key, ctx);
+ }
+
+ /// Finds the value associated with a key in the map
+ pub fn get(self: Self, key: K) ?*V {
+ return self.unmanaged.getContext(key, self.ctx);
+ }
+
+ pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
+ return self.unmanaged.getAdapted(key, ctx);
+ }
+
+ /// Finds the key and value associated with a key in the map
+ pub fn getEntry(self: Self, key: K) ?Entry {
+ return self.unmanaged.getEntryContext(key, self.ctx);
+ }
+
+ pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
+ return self.unmanaged.getEntryAdapted(key, ctx);
+ }
+
+ /// Check if the map contains a key
+ pub fn contains(self: Self, key: K) bool {
+ return self.unmanaged.containsContext(key, self.ctx);
+ }
+
+ pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
+ return self.unmanaged.containsAdapted(key, ctx);
+ }
+
+ /// If there is an `Entry` with a matching key, it is deleted from
+ /// the hash map, and then returned from this function.
+ pub fn remove(self: *Self, key: K) bool {
+ return self.unmanaged.removeContext(key, self.ctx);
+ }
+
+ pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
+ return self.unmanaged.removeAdapted(key, ctx);
+ }
+
+ /// Creates a copy of this map, using the same allocator
+ pub fn clone(self: Self) !Self {
+ var other = try self.unmanaged.cloneContext(self.allocator, self.ctx);
+ return other.promoteContext(self.allocator, self.ctx);
+ }
+
+ /// Creates a copy of this map, using a specified allocator
+ pub fn cloneWithAllocator(self: Self, new_allocator: *Allocator) !Self {
+ var other = try self.unmanaged.cloneContext(new_allocator, self.ctx);
+ return other.promoteContext(new_allocator, self.ctx);
+ }
+
+ /// Creates a copy of this map, using a specified context
+ pub fn cloneWithContext(self: Self, new_ctx: anytype) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
+ var other = try self.unmanaged.cloneContext(self.allocator, new_ctx);
+ return other.promoteContext(self.allocator, new_ctx);
+ }
+
+ /// Creates a copy of this map, using a specified allocator and context
+ pub fn cloneWithAllocatorAndContext(new_allocator: *Allocator, new_ctx: anytype) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
+ var other = try self.unmanaged.cloneContext(new_allocator, new_ctx);
+ return other.promoteContext(new_allocator, new_ctx);
+ }
+ };
+}
+
+/// A HashMap based on open addressing and linear probing.
+/// A lookup or modification typically occurs only 2 cache misses.
+/// No order is guaranteed and any modification invalidates live iterators.
+/// It achieves good performance with quite high load factors (by default,
+/// grow is triggered at 80% full) and only one byte of overhead per element.
+/// The struct itself is only 16 bytes for a small footprint. This comes at
+/// the price of handling size with u32, which should be reasonnable enough
+/// for almost all uses.
+/// Deletions are achieved with tombstones.
+pub fn HashMapUnmanaged(
+ comptime K: type,
+ comptime V: type,
+ comptime Context: type,
+ comptime max_load_percentage: u64,
+) type {
+ if (max_load_percentage <= 0 or max_load_percentage >= 100)
+ @compileError("max_load_percentage must be between 0 and 100.");
+ comptime verifyContext(Context, K, K, u64);
+
+ return struct {
+ const Self = @This();
+
+ // This is actually a midway pointer to the single buffer containing
+ // a `Header` field, the `Metadata`s and `Entry`s.
+ // At `-@sizeOf(Header)` is the Header field.
+ // At `sizeOf(Metadata) * capacity + offset`, which is pointed to by
+ // self.header().entries, is the array of entries.
+ // This means that the hashmap only holds one live allocation, to
+ // reduce memory fragmentation and struct size.
+ /// Pointer to the metadata.
+ metadata: ?[*]Metadata = null,
+
+ /// Current number of elements in the hashmap.
+ size: Size = 0,
+
+ // Having a countdown to grow reduces the number of instructions to
+ // execute when determining if the hashmap has enough capacity already.
+ /// Number of available slots before a grow is needed to satisfy the
+ /// `max_load_percentage`.
+ available: Size = 0,
+
+ // This is purely empirical and not a /very smart magic constantâ„¢/.
+ /// Capacity of the first grow when bootstrapping the hashmap.
+ const minimal_capacity = 8;
+
+ // This hashmap is specially designed for sizes that fit in a u32.
+ pub const Size = u32;
+
+ // u64 hashes guarantee us that the fingerprint bits will never be used
+ // to compute the index of a slot, maximizing the use of entropy.
+ pub const Hash = u64;
+
+ pub const Entry = struct {
+ key: *K,
+ value: *V,
+ };
+
+ pub const KV = struct {
+ key: K,
+ value: V,
+ };
+
+ const Header = packed struct {
+ values: [*]V,
+ keys: [*]K,
+ capacity: Size,
+ };
+
+ /// Metadata for a slot. It can be in three states: empty, used or
+ /// tombstone. Tombstones indicate that an entry was previously used,
+ /// they are a simple way to handle removal.
+ /// To this state, we add 6 bits from the slot's key hash. These are
+ /// used as a fast way to disambiguate between entries without
+ /// having to use the equality function. If two fingerprints are
+ /// different, we know that we don't have to compare the keys at all.
+ /// The 6 bits are the highest ones from a 64 bit hash. This way, not
+ /// only we use the `log2(capacity)` lowest bits from the hash to determine
+ /// a slot index, but we use 6 more bits to quickly resolve collisions
+ /// when multiple elements with different hashes end up wanting to be in / the same slot.
+ /// Not using the equality function means we don't have to read into
+ /// the entries array, avoiding a likely cache miss.
+ const Metadata = packed struct {
+ const FingerPrint = u6;
+
+ used: u1 = 0,
+ tombstone: u1 = 0,
+ fingerprint: FingerPrint = 0,
+
+ pub fn isUsed(self: Metadata) bool {
+ return self.used == 1;
+ }
+
+ pub fn isTombstone(self: Metadata) bool {
+ return self.tombstone == 1;
+ }
+
+ pub fn takeFingerprint(hash: Hash) FingerPrint {
+ const hash_bits = @typeInfo(Hash).Int.bits;
+ const fp_bits = @typeInfo(FingerPrint).Int.bits;
+ return @truncate(FingerPrint, hash >> (hash_bits - fp_bits));
+ }
+
+ pub fn fill(self: *Metadata, fp: FingerPrint) void {
+ self.used = 1;
+ self.tombstone = 0;
+ self.fingerprint = fp;
+ }
+
+ pub fn remove(self: *Metadata) void {
+ self.used = 0;
+ self.tombstone = 1;
+ self.fingerprint = 0;
+ }
+ };
+
+ comptime {
+ assert(@sizeOf(Metadata) == 1);
+ assert(@alignOf(Metadata) == 1);
+ }
+
+ pub const Iterator = struct {
+ hm: *const Self,
+ index: Size = 0,
+
+ pub fn next(it: *Iterator) ?Entry {
+ assert(it.index <= it.hm.capacity());
+ if (it.hm.size == 0) return null;
+
+ const cap = it.hm.capacity();
+ const end = it.hm.metadata.? + cap;
+ var metadata = it.hm.metadata.? + it.index;
+
+ while (metadata != end) : ({
+ metadata += 1;
+ it.index += 1;
+ }) {
+ if (metadata[0].isUsed()) {
+ const key = &it.hm.keys()[it.index];
+ const value = &it.hm.values()[it.index];
+ it.index += 1;
+ return Entry{ .key = key, .value = value };
+ }
+ }
+
+ return null;
+ }
+ };
+
+ pub const GetOrPutResult = struct {
+ entry: Entry,
+ found_existing: bool,
+ };
+
+ pub const Managed = HashMap(K, V, Context, max_load_percentage);
+
+ pub fn promote(self: Self, allocator: *Allocator) Managed {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
+ return promoteContext(self, allocator, undefined);
+ }
+
+ pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
+ return .{
+ .unmanaged = self,
+ .allocator = allocator,
+ .ctx = ctx,
+ };
+ }
+
+ fn isUnderMaxLoadPercentage(size: Size, cap: Size) bool {
+ return size * 100 < max_load_percentage * cap;
+ }
+
+ pub fn deinit(self: *Self, allocator: *Allocator) void {
+ self.deallocate(allocator);
+ self.* = undefined;
+ }
+
+ fn capacityForSize(size: Size) Size {
+ var new_cap = @truncate(u32, (@as(u64, size) * 100) / max_load_percentage + 1);
+ new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable;
+ return new_cap;
+ }
+
+ pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureCapacityContext instead.");
+ return ensureCapacityContext(self, allocator, new_size, undefined);
+ }
+
+ pub fn ensureCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void {
+ if (new_size > self.size)
+ try self.growIfNeeded(allocator, new_size - self.size, ctx);
+ }
+
+ pub fn clearRetainingCapacity(self: *Self) void {
+ if (self.metadata) |_| {
+ self.initMetadatas();
+ self.size = 0;
+ self.available = @truncate(u32, (self.capacity() * max_load_percentage) / 100);
+ }
+ }
+
+ pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+ self.deallocate(allocator);
+ self.size = 0;
+ self.available = 0;
+ }
+
+ pub fn count(self: *const Self) Size {
+ return self.size;
+ }
+
+ fn header(self: *const Self) *Header {
+ return @ptrCast(*Header, @ptrCast([*]Header, self.metadata.?) - 1);
+ }
+
+ fn keys(self: *const Self) [*]K {
+ return self.header().keys;
+ }
+
+ fn values(self: *const Self) [*]V {
+ return self.header().values;
+ }
+
+ pub fn capacity(self: *const Self) Size {
+ if (self.metadata == null) return 0;
+
+ return self.header().capacity;
+ }
+
+ pub fn iterator(self: *const Self) Iterator {
+ return .{ .hm = self };
+ }
+
+ pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
+ return self.putNoClobberContext(allocator, key, value, undefined);
+ }
+
+ /// Insert an entry in the map. Assumes it is not already present.
+ pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ assert(!self.containsContext(key, ctx));
+ try self.growIfNeeded(allocator, 1, ctx);
+
+ self.putAssumeCapacityNoClobberContext(key, value, ctx);
+ }
+
+ /// Asserts there is enough capacity to store the new key-value pair.
+ /// Clobbers any existing data. To detect if a put would clobber
+ /// existing data, see `getOrPutAssumeCapacity`.
+ pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityContext instead.");
+ return self.putAssumeCapacityContext(key, value, undefined);
+ }
+
+ pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void {
+ const gop = self.getOrPutAssumeCapacityContext(key, ctx);
+ gop.entry.value.* = value;
+ }
+
+ pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityNoClobberContext instead.");
+ return self.putAssumeCapacityNoClobberContext(key, value, undefined);
+ }
+
+ /// Insert an entry in the map. Assumes it is not already present,
+ /// and that no allocation is needed.
+ pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void {
+ assert(!self.containsContext(key, ctx));
+
+ const hash = ctx.hash(key);
+ const mask = self.capacity() - 1;
+ var idx = @truncate(usize, hash & mask);
+
+ var metadata = self.metadata.? + idx;
+ while (metadata[0].isUsed()) {
+ idx = (idx + 1) & mask;
+ metadata = self.metadata.? + idx;
+ }
+
+ if (!metadata[0].isTombstone()) {
+ assert(self.available > 0);
+ self.available -= 1;
+ }
+
+ const fingerprint = Metadata.takeFingerprint(hash);
+ metadata[0].fill(fingerprint);
+ self.keys()[idx] = key;
+ self.values()[idx] = value;
+
+ self.size += 1;
+ }
+
+ /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
+ pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
+ return self.fetchPutContext(allocator, key, value, undefined);
+ }
+
+ pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
+ const gop = try self.getOrPutContext(allocator, key, ctx);
+ var result: ?KV = null;
+ if (gop.found_existing) {
+ result = KV{
+ .key = gop.entry.key.*,
+ .value = gop.entry.value.*,
+ };
+ }
+ gop.entry.value.* = value;
+ return result;
+ }
+
+ /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
+ /// If insertion happens, asserts there is enough capacity without allocating.
+ pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutAssumeCapacityContext instead.");
+ return self.fetchPutAssumeCapacityContext(allocator, key, value, undefined);
+ }
+
+ pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV {
+ const gop = self.getOrPutAssumeCapacityContext(key, ctx);
+ var result: ?KV = null;
+ if (gop.found_existing) {
+ result = KV{
+ .key = gop.entry.key.*,
+ .value = gop.entry.value.*,
+ };
+ }
+ gop.entry.value.* = value;
+ return result;
+ }
+
+ /// If there is an `Entry` with a matching key, it is deleted from
+ /// the hash map, and then returned from this function.
+ pub fn fetchRemove(self: *Self, key: K) ?KV {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchRemoveContext instead.");
+ return self.fetchRemoveContext(allocator, key, value, undefined);
+ }
+
+ pub fn fetchRemoveContext(self: *Self, key: K, ctx: Context) ?KV {
+ return self.fetchRemoveAdapted(key, ctx);
+ }
+
+ pub fn fetchRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
+ if (self.getIndex(key, ctx)) |idx| {
+ const old_key = &self.keys()[idx];
+ const old_val = &self.values()[idx];
+ const result = KV{
+ .key = old_key.*,
+ .value = old_val.*,
+ };
+ self.metadata.?[idx].remove();
+ old_key.* = undefined;
+ old_val.* = undefined;
+ self.size -= 1;
+ return result;
+ }
+
+ return null;
+ }
+
+ /// Find the index containing the data for the given key.
+ /// Whether this function returns null is almost always
+ /// branched on after this function returns, and this function
+ /// returns null/not null from separate code paths. We
+ /// want the optimizer to remove that branch and instead directly
+ /// fuse the basic blocks after the branch to the basic blocks
+ /// from this function. To encourage that, this function is
+ /// marked as inline.
+ fn getIndex(self: Self, key: anytype, ctx: anytype) callconv(.Inline) ?usize {
+ comptime verifyContext(@TypeOf(ctx), @TypeOf(key), K, Hash);
+
+ if (self.size == 0) {
+ return null;
+ }
+
+ // If you get a compile error on this line, it means that your generic hash
+ // function is invalid for these parameters.
+ const hash = ctx.hash(key);
+ // verifyContext can't verify the return type of generic hash functions,
+ // so we need to double-check it here.
+ if (@TypeOf(hash) != Hash) {
+ @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type! " ++ @typeName(Hash) ++ " was expected, but found " ++ @typeName(@TypeOf(hash)));
+ }
+ const mask = self.capacity() - 1;
+ const fingerprint = Metadata.takeFingerprint(hash);
+ var idx = @truncate(usize, hash & mask);
+
+ var metadata = self.metadata.? + idx;
+ while (metadata[0].isUsed() or metadata[0].isTombstone()) {
+ if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
+ const test_key = &self.keys()[idx];
+ // If you get a compile error on this line, it means that your generic eql
+ // function is invalid for these parameters.
+ const eql = ctx.eql(key, test_key.*);
+ // verifyContext can't verify the return type of generic eql functions,
+ // so we need to double-check it here.
+ if (@TypeOf(eql) != bool) {
+ @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type! bool was expected, but found " ++ @typeName(@TypeOf(eql)));
+ }
+ if (eql) {
+ return idx;
+ }
+ }
+
+ idx = (idx + 1) & mask;
+ metadata = self.metadata.? + idx;
+ }
+
+ return null;
+ }
+
+ pub fn getEntry(self: Self, key: K) ?Entry {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getEntryContext instead.");
+ return self.getEntryContext(key, undefined);
+ }
+
+ pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry {
+ return self.getEntryAdapted(key, ctx);
+ }
+
+ pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
+ if (self.getIndex(key, ctx)) |idx| {
+ return Entry{
+ .key = &self.keys()[idx],
+ .value = &self.values()[idx],
+ };
+ }
+ return null;
+ }
+
+ /// Insert an entry if the associated key is not already present, otherwise update preexisting value.
+ pub fn put(self: *Self, allocator: *Allocator, key: K, v: Value) !void {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
+ return self.putContext(allocator, key, value, undefined);
+ }
+
+ pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ const result = try self.getOrPutContext(allocator, key, ctx);
+ result.entry.value.* = value;
+ }
+
+ /// Get an optional pointer to the value associated with key, if present.
+ pub fn get(self: Self, key: K, ctx: Context) ?*V {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getContext instead.");
+ return self.getContext(key, undefined);
+ }
+
+ pub fn getContext(self: Self, key: K, ctx: Context) ?*V {
+ return self.getAdapted(key, ctx);
+ }
+
+ /// Get an optional pointer to the value associated with key, if present.
+ pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
+ if (self.getIndex(key, ctx)) |idx| {
+ return &self.values()[idx];
+ }
+ return null;
+ }
+
+ pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
+ return self.getOrPutContext(allocator, key, undefined);
+ }
+
+ pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
+ const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
+ if (!gop.found_existing) {
+ gop.entry.key.* = key;
+ }
+ return gop;
+ }
+
+ pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
+ return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
+ }
+
+ pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
+ self.growIfNeeded(allocator, 1, ctx) catch |err| {
+ // If allocation fails, try to do the lookup anyway.
+ // If we find an existing item, we can return it.
+ // Otherwise return the error, we could not add another.
+ const index = self.getIndex(key, key_ctx) orelse return err;
+ return GetOrPutResult{
+ .entry = .{
+ .key = &self.keys()[index],
+ .value = &self.values()[index],
+ },
+ .found_existing = true,
+ };
+ };
+ return self.getOrPutAssumeCapacityAdapted(key, key_ctx);
+ }
+
+ pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutAssumeCapacityContext instead.");
+ return self.getOrPutAssumeCapacityContext(key, undefined);
+ }
+
+ pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult {
+ const result = self.getOrPutAssumeCapacityAdapted(key, ctx);
+ if (!result.found_existing) {
+ result.entry.key.* = key;
+ }
+ return result;
+ }
+
+ pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
+ comptime verifyContext(@TypeOf(ctx), @TypeOf(key), K, Hash);
+
+ // If you get a compile error on this line, it means that your generic hash
+ // function is invalid for these parameters.
+ const hash = ctx.hash(key);
+ // verifyContext can't verify the return type of generic hash functions,
+ // so we need to double-check it here.
+ if (@TypeOf(hash) != Hash) {
+ @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type! " ++ @typeName(Hash) ++ " was expected, but found " ++ @typeName(@TypeOf(hash)));
+ }
+ const mask = self.capacity() - 1;
+ const fingerprint = Metadata.takeFingerprint(hash);
+ var idx = @truncate(usize, hash & mask);
+
+ var first_tombstone_idx: usize = self.capacity(); // invalid index
+ var metadata = self.metadata.? + idx;
+ while (metadata[0].isUsed() or metadata[0].isTombstone()) {
+ if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
+ const test_key = &self.keys()[idx];
+ // If you get a compile error on this line, it means that your generic eql
+ // function is invalid for these parameters.
+ const eql = ctx.eql(key, test_key.*);
+ // verifyContext can't verify the return type of generic eql functions,
+ // so we need to double-check it here.
+ if (@TypeOf(eql) != bool) {
+ @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type! bool was expected, but found " ++ @typeName(@TypeOf(eql)));
+ }
+ if (eql) {
+ return GetOrPutResult{ .entry = .{
+ .key = test_key,
+ .value = &self.values()[idx],
+ }, .found_existing = true };
+ }
+ } else if (first_tombstone_idx == self.capacity() and metadata[0].isTombstone()) {
+ first_tombstone_idx = idx;
+ }
+
+ idx = (idx + 1) & mask;
+ metadata = self.metadata.? + idx;
+ }
+
+ if (first_tombstone_idx < self.capacity()) {
+ // Cheap try to lower probing lengths after deletions. Recycle a tombstone.
+ idx = first_tombstone_idx;
+ metadata = self.metadata.? + idx;
+ } else {
+ // We're using a slot previously free.
+ self.available -= 1;
+ }
+
+ metadata[0].fill(fingerprint);
+ const new_key = &self.keys()[idx];
+ const new_value = &self.values()[idx];
+ new_key.* = key;
+ new_value.* = undefined;
+ self.size += 1;
+
+ return GetOrPutResult{ .entry = .{
+ .key = new_key,
+ .value = new_value,
+ }, .found_existing = false };
+ }
+
+ pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
+ return self.getOrPutValueContext(allocator, key, value, undefined);
+ }
+
+ pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry {
+ const res = try self.getOrPutAdapted(allocator, key, ctx);
+ if (!res.found_existing) {
+ res.entry.key.* = key;
+ res.entry.value.* = value;
+ }
+ return res.entry;
+ }
+
+ /// Return true if there is a value associated with key in the map.
+ pub fn contains(self: *const Self, key: K) bool {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call containsContext instead.");
+ return self.containsContext(key, undefined);
+ }
+
+ pub fn containsContext(self: *const Self, key: K, ctx: Context) bool {
+ return self.containsAdapted(key, ctx);
+ }
+
+ pub fn containsAdapted(self: *const Self, key: anytype, ctx: anytype) bool {
+ return self.getIndex(key, ctx) != null;
+ }
+
+ /// If there is an `Entry` with a matching key, it is deleted from
+ /// the hash map, and this function returns true. Otherwise this
+ /// function returns false.
+ pub fn remove(self: *Self, key: K) bool {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call removeContext instead.");
+ return self.removeContext(key, undefined);
+ }
+
+ pub fn removeContext(self: *Self, key: K, ctx: Context) bool {
+ return self.removeAdapted(key, ctx);
+ }
+
+ pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
+ if (self.getIndex(key, ctx)) |idx| {
+ self.metadata.?[idx].remove();
+ self.keys()[idx] = undefined;
+ self.values()[idx] = undefined;
+ self.size -= 1;
+ return true;
+ }
+
+ return false;
+ }
+
+ fn initMetadatas(self: *Self) void {
+ @memset(@ptrCast([*]u8, self.metadata.?), 0, @sizeOf(Metadata) * self.capacity());
+ }
+
+ // This counts the number of occupied slots, used + tombstones, which is
+ // what has to stay under the max_load_percentage of capacity.
+ fn load(self: *const Self) Size {
+ const max_load = (self.capacity() * max_load_percentage) / 100;
+ assert(max_load >= self.available);
+ return @truncate(Size, max_load - self.available);
+ }
+
+ fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size, ctx: Context) !void {
+ if (new_count > self.available) {
+ try self.grow(allocator, capacityForSize(self.load() + new_count), ctx);
+ }
+ }
+
+ pub fn clone(self: Self, allocator: *Allocator) !Self {
+ if (@sizeOf(Context) != 0)
+ @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
+ return self.cloneContext(key, @as(Context, undefined));
+ }
+
+ pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
+ var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){};
+ if (self.size == 0)
+ return other;
+
+ const new_cap = capacityForSize(self.size);
+ try other.allocate(allocator, new_cap);
+ other.initMetadatas();
+ other.available = @truncate(u32, (new_cap * max_load_percentage) / 100);
+
+ var i: Size = 0;
+ var metadata = self.metadata.?;
+ var keys_ptr = self.keys();
+ var values_ptr = self.values();
+ while (i < self.capacity()) : (i += 1) {
+ if (metadata[i].isUsed()) {
+ other.putAssumeCapacityNoClobberContext(keys_ptr[i], values_ptr[i], new_ctx);
+ if (other.size == self.size)
+ break;
+ }
+ }
+
+ return other;
+ }
+
+ fn grow(self: *Self, allocator: *Allocator, new_capacity: Size, ctx: Context) !void {
+ @setCold(true);
+ const new_cap = std.math.max(new_capacity, minimal_capacity);
+ assert(new_cap > self.capacity());
+ assert(std.math.isPowerOfTwo(new_cap));
+
+ var map = Self{};
+ defer map.deinit(allocator);
+ try map.allocate(allocator, new_cap);
+ map.initMetadatas();
+ map.available = @truncate(u32, (new_cap * max_load_percentage) / 100);
+
+ if (self.size != 0) {
+ const old_capacity = self.capacity();
+ var i: Size = 0;
+ var metadata = self.metadata.?;
+ var keys_ptr = self.keys();
+ var values_ptr = self.values();
+ while (i < old_capacity) : (i += 1) {
+ if (metadata[i].isUsed()) {
+ map.putAssumeCapacityNoClobberContext(keys_ptr[i], values_ptr[i], ctx);
+ if (map.size == self.size)
+ break;
+ }
+ }
+ }
+
+ self.size = 0;
+ std.mem.swap(Self, self, &map);
+ }
+
+ fn allocate(self: *Self, allocator: *Allocator, new_capacity: Size) !void {
+ const header_align = @alignOf(Header);
+ const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
+ const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
+ const max_align = comptime math_max3(header_align, key_align, val_align);
+
+ const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata);
+ comptime assert(@alignOf(Metadata) == 1);
+
+ const keys_start = std.mem.alignForward(meta_size, key_align);
+ const keys_end = keys_start + new_capacity * @sizeOf(K);
+
+ const vals_start = std.mem.alignForward(keys_end, val_align);
+ const vals_end = vals_start + new_capacity * @sizeOf(V);
+
+ const total_size = std.mem.alignForward(vals_end, max_align);
+
+ const slice = try allocator.alignedAlloc(u8, max_align, total_size);
+ const ptr = @ptrToInt(slice.ptr);
+
+ const metadata = ptr + @sizeOf(Header);
+
+ const hdr = @intToPtr(*Header, ptr);
+ if (@sizeOf([*]V) != 0) {
+ hdr.values = @intToPtr([*]V, ptr + vals_start);
+ }
+ if (@sizeOf([*]K) != 0) {
+ hdr.keys = @intToPtr([*]K, ptr + keys_start);
+ }
+ hdr.capacity = new_capacity;
+ self.metadata = @intToPtr([*]Metadata, metadata);
+ }
+
+ fn deallocate(self: *Self, allocator: *Allocator) void {
+ if (self.metadata == null) return;
+
+ const header_align = @alignOf(Header);
+ const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
+ const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
+ const max_align = comptime math_max3(header_align, key_align, val_align);
+
+ const cap = self.capacity();
+ const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata);
+ comptime assert(@alignOf(Metadata) == 1);
+
+ const keys_start = std.mem.alignForward(meta_size, key_align);
+ const keys_end = keys_start + cap * @sizeOf(K);
+
+ const vals_start = std.mem.alignForward(keys_end, val_align);
+ const vals_end = vals_start + cap * @sizeOf(V);
+
+ const total_size = std.mem.alignForward(vals_end, max_align);
+
+ const slice = @intToPtr([*]align(max_align) u8, @ptrToInt(self.header()))[0..total_size];
+ allocator.free(slice);
+
+ self.metadata = null;
+ self.available = 0;
+ }
+ };
+}
+
+const testing = std.testing;
+const expect = std.testing.expect;
+const expectEqual = std.testing.expectEqual;
+
+test "std.hash_map basic usage" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ const count = 5;
+ var i: u32 = 0;
+ var total: u32 = 0;
+ while (i < count) : (i += 1) {
+ try map.put(i, i);
+ total += i;
+ }
+
+ var sum: u32 = 0;
+ var it = map.iterator();
+ while (it.next()) |kv| {
+ sum += kv.key.*;
+ }
+ try expectEqual(total, sum);
+
+ i = 0;
+ sum = 0;
+ while (i < count) : (i += 1) {
+ try expectEqual(i, map.get(i).?.*);
+ sum += map.get(i).?.*;
+ }
+ try expectEqual(total, sum);
+}
+
+test "std.hash_map ensureCapacity" {
+ var map = AutoHashMap(i32, i32).init(std.testing.allocator);
+ defer map.deinit();
+
+ try map.ensureCapacity(20);
+ const initial_capacity = map.capacity();
+ try testing.expect(initial_capacity >= 20);
+ var i: i32 = 0;
+ while (i < 20) : (i += 1) {
+ try testing.expect(map.fetchPutAssumeCapacity(i, i + 10) == null);
+ }
+ // shouldn't resize from putAssumeCapacity
+ try testing.expect(initial_capacity == map.capacity());
+}
+
+test "std.hash_map ensureCapacity with tombstones" {
+ var map = AutoHashMap(i32, i32).init(std.testing.allocator);
+ defer map.deinit();
+
+ var i: i32 = 0;
+ while (i < 100) : (i += 1) {
+ try map.ensureCapacity(@intCast(u32, map.count() + 1));
+ map.putAssumeCapacity(i, i);
+ // Remove to create tombstones that still count as load in the hashmap.
+ _ = map.remove(i);
+ }
+}
+
+test "std.hash_map clearRetainingCapacity" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ map.clearRetainingCapacity();
+
+ try map.put(1, 1);
+ try expectEqual(map.get(1).?.*, 1);
+ try expectEqual(map.count(), 1);
+
+ map.clearRetainingCapacity();
+ map.putAssumeCapacity(1, 1);
+ try expectEqual(map.get(1).?.*, 1);
+ try expectEqual(map.count(), 1);
+
+ const cap = map.capacity();
+ try expect(cap > 0);
+
+ map.clearRetainingCapacity();
+ map.clearRetainingCapacity();
+ try expectEqual(map.count(), 0);
+ try expectEqual(map.capacity(), cap);
+ try expect(!map.contains(1));
+}
+
+test "std.hash_map grow" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ const growTo = 12456;
+
+ var i: u32 = 0;
+ while (i < growTo) : (i += 1) {
+ try map.put(i, i);
+ }
+ try expectEqual(map.count(), growTo);
+
+ i = 0;
+ var it = map.iterator();
+ while (it.next()) |kv| {
+ try expectEqual(kv.key.*, kv.value.*);
+ i += 1;
+ }
+ try expectEqual(i, growTo);
+
+ i = 0;
+ while (i < growTo) : (i += 1) {
+ try expectEqual(map.get(i).?.*, i);
+ }
+}
+
+test "std.hash_map clone" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ var a = try map.clone();
+ defer a.deinit();
+
+ try expectEqual(a.count(), 0);
+
+ try a.put(1, 1);
+ try a.put(2, 2);
+ try a.put(3, 3);
+
+ var b = try a.clone();
+ defer b.deinit();
+
+ try expectEqual(b.count(), 3);
+ try expectEqual(b.get(1).?.*, 1);
+ try expectEqual(b.get(2).?.*, 2);
+ try expectEqual(b.get(3).?.*, 3);
+}
+
+test "std.hash_map ensureCapacity with existing elements" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ try map.put(0, 0);
+ try expectEqual(map.count(), 1);
+ try expectEqual(map.capacity(), @TypeOf(map).Unmanaged.minimal_capacity);
+
+ try map.ensureCapacity(65);
+ try expectEqual(map.count(), 1);
+ try expectEqual(map.capacity(), 128);
+}
+
+test "std.hash_map ensureCapacity satisfies max load factor" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ try map.ensureCapacity(127);
+ try expectEqual(map.capacity(), 256);
+}
+
+test "std.hash_map remove" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ var i: u32 = 0;
+ while (i < 16) : (i += 1) {
+ try map.put(i, i);
+ }
+
+ i = 0;
+ while (i < 16) : (i += 1) {
+ if (i % 3 == 0) {
+ _ = map.remove(i);
+ }
+ }
+ try expectEqual(map.count(), 10);
+ var it = map.iterator();
+ while (it.next()) |kv| {
+ try expectEqual(kv.key.*, kv.value.*);
+ try expect(kv.key.* % 3 != 0);
+ }
+
+ i = 0;
+ while (i < 16) : (i += 1) {
+ if (i % 3 == 0) {
+ try expect(!map.contains(i));
+ } else {
+ try expectEqual(map.get(i).?.*, i);
+ }
+ }
+}
+
+test "std.hash_map reverse removes" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ var i: u32 = 0;
+ while (i < 16) : (i += 1) {
+ try map.putNoClobber(i, i);
+ }
+
+ i = 16;
+ while (i > 0) : (i -= 1) {
+ _ = map.remove(i - 1);
+ try expect(!map.contains(i - 1));
+ var j: u32 = 0;
+ while (j < i - 1) : (j += 1) {
+ try expectEqual(map.get(j).?.*, j);
+ }
+ }
+
+ try expectEqual(map.count(), 0);
+}
+
+test "std.hash_map multiple removes on same metadata" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ var i: u32 = 0;
+ while (i < 16) : (i += 1) {
+ try map.put(i, i);
+ }
+
+ _ = map.remove(7);
+ _ = map.remove(15);
+ _ = map.remove(14);
+ _ = map.remove(13);
+ try expect(!map.contains(7));
+ try expect(!map.contains(15));
+ try expect(!map.contains(14));
+ try expect(!map.contains(13));
+
+ i = 0;
+ while (i < 13) : (i += 1) {
+ if (i == 7) {
+ try expect(!map.contains(i));
+ } else {
+ try expectEqual(map.get(i).?.*, i);
+ }
+ }
+
+ try map.put(15, 15);
+ try map.put(13, 13);
+ try map.put(14, 14);
+ try map.put(7, 7);
+ i = 0;
+ while (i < 16) : (i += 1) {
+ try expectEqual(map.get(i).?.*, i);
+ }
+}
+
+test "std.hash_map put and remove loop in random order" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ var keys = std.ArrayList(u32).init(std.testing.allocator);
+ defer keys.deinit();
+
+ const size = 32;
+ const iterations = 100;
+
+ var i: u32 = 0;
+ while (i < size) : (i += 1) {
+ try keys.append(i);
+ }
+ var rng = std.rand.DefaultPrng.init(0);
+
+ while (i < iterations) : (i += 1) {
+ std.rand.Random.shuffle(&rng.random, u32, keys.items);
+
+ for (keys.items) |key| {
+ try map.put(key, key);
+ }
+ try expectEqual(map.count(), size);
+
+ for (keys.items) |key| {
+ _ = map.remove(key);
+ }
+ try expectEqual(map.count(), 0);
+ }
+}
+
+test "std.hash_map remove one million elements in random order" {
+ const Map = AutoHashMap(u32, u32);
+ const n = 1000 * 1000;
+ var map = Map.init(std.heap.page_allocator);
+ defer map.deinit();
+
+ var keys = std.ArrayList(u32).init(std.heap.page_allocator);
+ defer keys.deinit();
+
+ var i: u32 = 0;
+ while (i < n) : (i += 1) {
+ keys.append(i) catch unreachable;
+ }
+
+ var rng = std.rand.DefaultPrng.init(0);
+ std.rand.Random.shuffle(&rng.random, u32, keys.items);
+
+ for (keys.items) |key| {
+ map.put(key, key) catch unreachable;
+ }
+
+ std.rand.Random.shuffle(&rng.random, u32, keys.items);
+ i = 0;
+ while (i < n) : (i += 1) {
+ const key = keys.items[i];
+ _ = map.remove(key);
+ }
+}
+
+test "std.hash_map put" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ var i: u32 = 0;
+ while (i < 16) : (i += 1) {
+ try map.put(i, i);
+ }
+
+ i = 0;
+ while (i < 16) : (i += 1) {
+ try expectEqual(map.get(i).?.*, i);
+ }
+
+ i = 0;
+ while (i < 16) : (i += 1) {
+ try map.put(i, i * 16 + 1);
+ }
+
+ i = 0;
+ while (i < 16) : (i += 1) {
+ try expectEqual(map.get(i).?.*, i * 16 + 1);
+ }
+}
+
+test "std.hash_map putAssumeCapacity" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ try map.ensureCapacity(20);
+ var i: u32 = 0;
+ while (i < 20) : (i += 1) {
+ map.putAssumeCapacityNoClobber(i, i);
+ }
+
+ i = 0;
+ var sum = i;
+ while (i < 20) : (i += 1) {
+ sum += map.get(i).?.*;
+ }
+ try expectEqual(sum, 190);
+
+ i = 0;
+ while (i < 20) : (i += 1) {
+ map.putAssumeCapacity(i, 1);
+ }
+
+ i = 0;
+ sum = i;
+ while (i < 20) : (i += 1) {
+ sum += map.get(i).?.*;
+ }
+ try expectEqual(sum, 20);
+}
+
+test "std.hash_map getOrPut" {
+ var map = AutoHashMap(u32, u32).init(std.testing.allocator);
+ defer map.deinit();
+
+ var i: u32 = 0;
+ while (i < 10) : (i += 1) {
+ try map.put(i * 2, 2);
+ }
+
+ i = 0;
+ while (i < 20) : (i += 1) {
+ var n = try map.getOrPutValue(i, 1);
+ }
+
+ i = 0;
+ var sum = i;
+ while (i < 20) : (i += 1) {
+ sum += map.get(i).?.*;
+ }
+
+ try expectEqual(sum, 30);
+}
+
+test "std.hash_map basic hash map usage" {
+ var map = AutoHashMap(i32, i32).init(std.testing.allocator);
+ defer map.deinit();
+
+ try testing.expect((try map.fetchPut(1, 11)) == null);
+ try testing.expect((try map.fetchPut(2, 22)) == null);
+ try testing.expect((try map.fetchPut(3, 33)) == null);
+ try testing.expect((try map.fetchPut(4, 44)) == null);
+
+ try map.putNoClobber(5, 55);
+ try testing.expect((try map.fetchPut(5, 66)).?.value == 55);
+ try testing.expect((try map.fetchPut(5, 55)).?.value == 66);
+
+ const gop1 = try map.getOrPut(5);
+ try testing.expect(gop1.found_existing == true);
+ try testing.expect(gop1.entry.value.* == 55);
+ gop1.entry.value.* = 77;
+ try testing.expect(map.getEntry(5).?.value.* == 77);
+
+ const gop2 = try map.getOrPut(99);
+ try testing.expect(gop2.found_existing == false);
+ gop2.entry.value.* = 42;
+ try testing.expect(map.getEntry(99).?.value.* == 42);
+
+ const gop3 = try map.getOrPutValue(5, 5);
+ try testing.expect(gop3.value.* == 77);
+
+ const gop4 = try map.getOrPutValue(100, 41);
+ try testing.expect(gop4.value.* == 41);
+
+ try testing.expect(map.contains(2));
+ try testing.expect(map.getEntry(2).?.value.* == 22);
+ try testing.expect(map.get(2).?.* == 22);
+
+ const rmv1 = map.fetchRemove(2);
+ try testing.expect(rmv1.?.key == 2);
+ try testing.expect(rmv1.?.value == 22);
+ try testing.expect(map.fetchRemove(2) == null);
+ try testing.expect(map.remove(2) == false);
+ try testing.expect(map.getEntry(2) == null);
+ try testing.expect(map.get(2) == null);
+
+ try testing.expect(map.remove(3) == true);
+}
+
+test "std.hash_map clone" {
+ var original = AutoHashMap(i32, i32).init(std.testing.allocator);
+ defer original.deinit();
+
+ var i: u8 = 0;
+ while (i < 10) : (i += 1) {
+ try original.putNoClobber(i, i * 10);
+ }
+
+ var copy = try original.clone();
+ defer copy.deinit();
+
+ i = 0;
+ while (i < 10) : (i += 1) {
+ try testing.expect(copy.get(i).?.* == i * 10);
+ }
+}
diff --git a/src/js_ast.zig b/src/js_ast.zig
index e3b1c62d5..ee6e35729 100644
--- a/src/js_ast.zig
+++ b/src/js_ast.zig
@@ -1455,10 +1455,10 @@ pub const Expr = struct {
return Expr.joinWithComma(all[0], all[1], allocator);
},
else => {
- var expr = Expr.joinWithComma(all[0], all[1], allocator);
- var _all = all[2 .. all.len - 1];
- for (_all) |right| {
- expr = Expr.joinWithComma(expr, right, allocator);
+ var i: usize = 1;
+ var expr = all[0];
+ while (i < all.len) : (i += 1) {
+ expr = Expr.joinWithComma(expr, all[i], allocator);
}
return expr;
diff --git a/src/js_lexer.zig b/src/js_lexer.zig
index ac575a062..89e2e06dd 100644
--- a/src/js_lexer.zig
+++ b/src/js_lexer.zig
@@ -2176,14 +2176,16 @@ fn float64(num: anytype) callconv(.Inline) f64 {
}
fn test_lexer(contents: []const u8) Lexer {
- alloc.setup(std.heap.page_allocator) catch unreachable;
+ alloc.setup(std.heap.c_allocator) catch unreachable;
var log = alloc.dynamic.create(logger.Log) catch unreachable;
log.* = logger.Log.init(alloc.dynamic);
- var source = logger.Source.initPathString(
+ var _source = logger.Source.initPathString(
"index.js",
contents,
);
- return Lexer.init(log, &source, alloc.dynamic) catch unreachable;
+ var source = alloc.dynamic.create(logger.Source) catch unreachable;
+ source.* = _source;
+ return Lexer.init(log, source, alloc.dynamic) catch unreachable;
}
// test "LexerType.next()" {
@@ -2238,7 +2240,7 @@ test "Lexer.next() simple" {
try lex.next();
}
-pub fn test_stringLiteralEquals(expected: string, source_text: string) void {
+pub fn test_stringLiteralEquals(expected: string, source_text: string) !void {
var msgs = std.ArrayList(logger.Msg).init(std.testing.allocator);
var log = logger.Log{
.msgs = msgs,
@@ -2256,8 +2258,9 @@ pub fn test_stringLiteralEquals(expected: string, source_text: string) void {
try lex.next();
}
- var lit = std.unicode.utf16leToUtf8Alloc(std.heap.page_allocator, lex.string_literal) catch unreachable;
+ var lit = std.unicode.utf16leToUtf8Alloc(std.heap.page_allocator, lex.stringLiteralUTF16()) catch unreachable;
std.testing.expectEqualStrings(expected, lit);
+ std.testing.expectEqualStrings(expected, lex.string_literal_slice);
}
pub fn test_skipTo(lexer: *LexerType, n: string) void {
@@ -2269,10 +2272,10 @@ pub fn test_skipTo(lexer: *LexerType, n: string) void {
}
test "LexerType.rawTemplateContents" {
- test_stringLiteralEquals("hello!", "const a = 'hello!';");
- test_stringLiteralEquals("hello!hi", "const b = 'hello!hi';");
- test_stringLiteralEquals("hello!\n\nhi", "const b = `hello!\n\nhi`;");
+ try test_stringLiteralEquals("hello!", "const a = 'hello!';");
+ try test_stringLiteralEquals("hello!hi", "const b = 'hello!hi';");
+ try test_stringLiteralEquals("hello!\n\nhi", "const b = `hello!\n\nhi`;");
// TODO: \r\n
// test_stringLiteralEquals("hello!\nhi", "const b = `hello!\r\nhi`;");
- test_stringLiteralEquals("hello!", "const b = `hello!${\"hi\"}`");
+ try test_stringLiteralEquals("hello!", "const b = `hello!${\"hi\"}`");
}
diff --git a/src/js_parser/js_parser.zig b/src/js_parser/js_parser.zig
index cb48f08e3..39860b42e 100644
--- a/src/js_parser/js_parser.zig
+++ b/src/js_parser/js_parser.zig
@@ -3,7 +3,7 @@ usingnamespace @import("imports.zig");
const TemplatePartTuple = std.meta.Tuple(&[_]type{ []E.TemplatePart, logger.Loc });
const ScopeOrderList = std.ArrayListUnmanaged(?ScopeOrder);
-var panic_buffer = std.mem.zeros(32 * 1024);
+var panic_buffer = std.mem.zeroes([32 * 1024]u8);
var panic_stream = std.io.fixedBufferStream(&panic_buffer);
pub fn ExpressionTransposer(comptime ctx: type, visitor: fn (ptr: *ctx, arg: Expr, state: anytype) Expr) type {
@@ -1771,6 +1771,8 @@ pub const P = struct {
// This is a general place to put lots of Expr objects
expr_list: List(Expr),
+ scope_order_to_visit: []ScopeOrder = &([_]ScopeOrder{}),
+
const TransposeState = struct {
is_await_target: bool = false,
is_then_catch_target: bool = false,
@@ -1827,7 +1829,7 @@ pub const P = struct {
p.import_records_for_current_part.append(import_record_index) catch unreachable;
p.ignoreUsage(p.require_ref);
- return p.e(E.Require{ .import_record_index = import_record_index }, arg.loc);
+ return p.e(E.Require{ .import_record_index = Ref.toInt(import_record_index) }, arg.loc);
},
else => {},
}
@@ -1845,13 +1847,16 @@ pub const P = struct {
};
pub fn s(p: *P, t: anytype, loc: logger.Loc) Stmt {
+ // Output.print("\nStmt: {s} - {d}\n", .{ @typeName(@TypeOf(t)), loc.start });
if (@typeInfo(@TypeOf(t)) == .Pointer) {
return Stmt.init(t, loc);
} else {
return Stmt.alloc(p.allocator, t, loc);
}
}
+
pub fn e(p: *P, t: anytype, loc: logger.Loc) Expr {
+ // Output.print("\nExpr: {s} - {d}\n", .{ @typeName(@TypeOf(t)), loc.start });
if (@typeInfo(@TypeOf(t)) == .Pointer) {
return Expr.init(t, loc);
} else {
@@ -2222,6 +2227,23 @@ pub const P = struct {
}
pub fn prepareForVisitPass(p: *P) !void {
+ {
+ var count: usize = 0;
+ for (p.scopes_in_order.items) |item| {
+ if (item != null) {
+ count += 1;
+ }
+ }
+ var i: usize = 0;
+ p.scope_order_to_visit = try p.allocator.alloc(ScopeOrder, p.scopes_in_order.items.len);
+ for (p.scopes_in_order.items) |item| {
+ if (item) |_item| {
+ p.scope_order_to_visit[i] = _item;
+ i += 1;
+ }
+ }
+ }
+
try p.pushScopeForVisitPass(js_ast.Scope.Kind.entry, locModuleScope);
p.fn_or_arrow_data_visit.is_outside_fn_or_arrow = true;
p.module_scope = p.current_scope;
@@ -2266,20 +2288,18 @@ pub const P = struct {
}
pub fn nextScopeInOrderForVisitPass(p: *P) ScopeOrder {
- while (p.scopes_in_order.items[p.scopes_in_order_visitor_index] == null) {
- p.scopes_in_order_visitor_index += 1;
- }
- const scope_order = p.scopes_in_order.items[p.scopes_in_order_visitor_index].?;
- p.scopes_in_order_visitor_index += 1;
- return scope_order;
+ const head = p.scope_order_to_visit[0];
+ p.scope_order_to_visit = p.scope_order_to_visit[1..p.scope_order_to_visit.len];
+ return head;
}
pub fn pushScopeForVisitPass(p: *P, kind: js_ast.Scope.Kind, loc: logger.Loc) !void {
- for (p.scopes_in_order.items[p.scopes_in_order_visitor_index..p.scopes_in_order.items.len]) |scope_order, i| {
- if (scope_order) |ord| {
- Output.print("Scope ({d}, {d})\n", .{ @enumToInt(ord.scope.kind), ord.loc.start });
- }
- }
+ // Output.print("\n+Loc: {d}\n", .{loc.start});
+ // for (p.scopes_in_order.items[p.scopes_in_order_visitor_index..p.scopes_in_order.items.len]) |scope_order, i| {
+ // if (scope_order) |ord| {
+ // Output.print("Scope ({d}, {d})\n", .{ @enumToInt(ord.scope.kind), ord.loc.start });
+ // }
+ // }
const order = p.nextScopeInOrderForVisitPass();
// Sanity-check that the scopes generated by the first and second passes match
@@ -2296,7 +2316,6 @@ pub const P = struct {
debugl("<pushScopeForParsePass>");
defer debugl("</pushScopeForParsePass>");
var parent: *Scope = p.current_scope;
- p.scope_id += 1;
var scope = try p.allocator.create(Scope);
scope.* = Scope{
@@ -2308,7 +2327,6 @@ pub const P = struct {
.kind = kind,
.label_ref = null,
.parent = parent,
- .id = p.scope_id,
};
try parent.children.append(scope);
@@ -2351,7 +2369,7 @@ pub const P = struct {
// Remember the length in case we call popAndDiscardScope() later
const scope_index = p.scopes_in_order.items.len;
try p.scopes_in_order.append(p.allocator, ScopeOrder{ .loc = loc, .scope = scope });
-
+ // Output.print("\nLoc: {d}\n", .{loc.start});
return scope_index;
}
@@ -3525,6 +3543,7 @@ pub const P = struct {
}
stmtOpts = ParseStatementOptions{};
try p.declareBinding(kind, value, &stmtOpts);
+ binding = value;
}
try p.lexer.expect(.t_open_brace);
@@ -5008,23 +5027,27 @@ pub const P = struct {
var name: ?js_ast.LocRef = null;
_ = p.pushScopeForParsePass(.function_args, loc) catch unreachable;
- defer p.popScope();
+ // The name is optional
if (p.lexer.token == .t_identifier) {
- name = js_ast.LocRef{
- .loc = loc,
+ // Don't declare the name "arguments" since it's shadowed and inaccessible
+ var _name = js_ast.LocRef{
+ .loc = p.lexer.loc(),
.ref = null,
};
- if (p.lexer.identifier.len > 0 and !strings.eql(p.lexer.identifier, "arguments")) {
- (name orelse unreachable).ref = try p.declareSymbol(.hoisted_function, (name orelse unreachable).loc, p.lexer.identifier);
+ const text = p.lexer.identifier;
+ if (text.len > 0 and !strings.eql(text, "arguments")) {
+ _name.ref = try p.declareSymbol(.hoisted_function, _name.loc, text);
} else {
- (name orelse unreachable).ref = try p.newSymbol(.hoisted_function, p.lexer.identifier);
+ _name.ref = try p.newSymbol(.hoisted_function, text);
}
debug("FUNC NAME {s}", .{p.lexer.identifier});
+ name = _name;
try p.lexer.next();
}
+ // Even anonymous functions can have TypeScript type parameters
if (p.options.ts) {
p.skipTypescriptTypeParameters();
}
@@ -5036,6 +5059,7 @@ pub const P = struct {
});
p.validateFunctionName(func, .expr);
+ p.popScope();
return p.e(js_ast.E.Function{
.func = func,
@@ -6281,6 +6305,7 @@ pub const P = struct {
const yes = try p.parseExpr(.comma);
p.allow_in = old_allow_in;
+
try p.lexer.expect(.t_colon);
const no = try p.parseExpr(.comma);
@@ -7069,6 +7094,7 @@ pub const P = struct {
const class = try p.parseClass(classKeyword, name, ParseClassOptions{});
p.popScope();
+
return p.e(class, loc);
},
.t_new => {
@@ -7906,9 +7932,7 @@ pub const P = struct {
pub fn visitFunc(p: *P, func: *G.Fn, open_parens_loc: logger.Loc) void {
const old_fn_or_arrow_data = p.fn_or_arrow_data_visit;
- defer p.fn_or_arrow_data_visit = old_fn_or_arrow_data;
const old_fn_only_data = p.fn_only_data_visit;
- defer p.fn_only_data_visit = old_fn_only_data;
p.fn_or_arrow_data_visit = FnOrArrowDataVisit{ .is_async = func.flags.is_async };
p.fn_only_data_visit = FnOnlyDataVisit{ .is_this_nested = true, .arguments_ref = func.arguments_ref };
@@ -7923,7 +7947,6 @@ pub const P = struct {
}
p.pushScopeForVisitPass(.function_args, open_parens_loc) catch unreachable;
- defer p.popScope();
p.visitArgs(
func.args,
VisitArgsOpts{
@@ -7933,16 +7956,18 @@ pub const P = struct {
},
);
- var body = func.body orelse p.panic("Expected visitFunc to have body {s}", .{func});
- p.pushScopeForVisitPass(.function_body, body.loc) catch unreachable;
- defer p.popScope();
- var stmts = List(Stmt).fromOwnedSlice(p.allocator, body.stmts);
- var temp_opts = PrependTempRefsOpts{ .kind = StmtsKind.fn_body, .fn_body_loc = body.loc };
+ assert(func.body != null);
+ p.pushScopeForVisitPass(.function_body, func.body.?.loc) catch unreachable;
+ var stmts = List(Stmt).fromOwnedSlice(p.allocator, func.body.?.stmts);
+ var temp_opts = PrependTempRefsOpts{ .kind = StmtsKind.fn_body, .fn_body_loc = func.body.?.loc };
p.visitStmtsAndPrependTempRefs(&stmts, &temp_opts) catch unreachable;
+ func.body.?.stmts = stmts.toOwnedSlice();
- body.stmts = stmts.toOwnedSlice();
+ p.popScope();
+ p.popScope();
- func.body = body;
+ p.fn_or_arrow_data_visit = old_fn_or_arrow_data;
+ p.fn_only_data_visit = old_fn_only_data;
}
pub fn maybeKeepExprSymbolName(p: *P, expr: Expr, original_name: string, was_anonymous_named_expr: bool) Expr {
@@ -7976,6 +8001,7 @@ pub const P = struct {
}
pub fn visitExprInOut(p: *P, expr: Expr, in: ExprIn) Expr {
+ // Output.print("\nVisit: {s} - {d}\n", .{ @tagName(expr.data), expr.loc.start });
switch (expr.data) {
.e_null, .e_super, .e_boolean, .e_big_int, .e_reg_exp, .e_new_target, .e_undefined => {},
.e_string => |e_| {
@@ -8713,7 +8739,10 @@ pub const P = struct {
const side_effects = SideEffects.toBoolean(e_.test_.data);
- if (side_effects.ok) {
+ if (!side_effects.ok) {
+ e_.yes = p.visitExpr(e_.yes);
+ e_.no = p.visitExpr(e_.no);
+ } else {
// Mark the control flow as dead if the branch is never taken
if (side_effects.value) {
// "true ? live : dead"
@@ -8730,9 +8759,6 @@ pub const P = struct {
p.is_control_flow_dead = old;
e_.no = p.visitExpr(e_.no);
}
- } else {
- e_.yes = p.visitExpr(e_.yes);
- e_.no = p.visitExpr(e_.no);
}
},
.e_await => |e_| {
@@ -8788,68 +8814,68 @@ pub const P = struct {
.e_object => |e_| {
if (in.assign_target != .none) {
p.maybeCommaSpreadError(e_.comma_after_spread);
- var has_spread = false;
- var has_proto = false;
+ }
- var i: usize = 0;
- while (i < e_.properties.len) : (i += 1) {
- var property = e_.properties[i];
-
- if (property.kind != .spread) {
- const key = p.visitExpr(property.key orelse Global.panic("Expected property key", .{}));
- e_.properties[i].key = key;
-
- // Forbid duplicate "__proto__" properties according to the specification
- if (!property.flags.is_computed and !property.flags.was_shorthand and !property.flags.is_method and in.assign_target == .none and key.data.isStringValue() and strings.eqlComptime(
- // __proto__ is utf8, assume it lives in refs
- key.data.e_string.utf8,
- "__proto__",
- )) {
- if (has_proto) {
- const r = js_lexer.rangeOfIdentifier(p.source, key.loc);
- p.log.addRangeError(p.source, r, "Cannot specify the \"__proto__\" property more than once per object") catch unreachable;
- }
- has_proto = true;
- }
- } else {
- has_spread = true;
- }
+ var has_spread = false;
+ var has_proto = false;
+ var i: usize = 0;
+ while (i < e_.properties.len) : (i += 1) {
+ var property = e_.properties[i];
- // Extract the initializer for expressions like "({ a: b = c } = d)"
- if (in.assign_target != .none and property.initializer != null and property.value != null) {
- switch (property.value.?.data) {
- .e_binary => |bin| {
- if (bin.op == .bin_assign) {
- property.initializer = bin.right;
- property.value = bin.left;
- }
- },
- else => {},
+ if (property.kind != .spread) {
+ const key = p.visitExpr(property.key orelse Global.panic("Expected property key", .{}));
+ e_.properties[i].key = key;
+
+ // Forbid duplicate "__proto__" properties according to the specification
+ if (!property.flags.is_computed and !property.flags.was_shorthand and !property.flags.is_method and in.assign_target == .none and key.data.isStringValue() and strings.eqlComptime(
+ // __proto__ is utf8, assume it lives in refs
+ key.data.e_string.utf8,
+ "__proto__",
+ )) {
+ if (has_proto) {
+ const r = js_lexer.rangeOfIdentifier(p.source, key.loc);
+ p.log.addRangeError(p.source, r, "Cannot specify the \"__proto__\" property more than once per object") catch unreachable;
}
+ has_proto = true;
}
+ } else {
+ has_spread = true;
+ }
- if (property.value != null) {
- property.value = p.visitExprInOut(property.value.?, ExprIn{ .assign_target = in.assign_target });
+ // Extract the initializer for expressions like "({ a: b = c } = d)"
+ if (in.assign_target != .none and property.initializer != null and property.value != null) {
+ switch (property.value.?.data) {
+ .e_binary => |bin| {
+ if (bin.op == .bin_assign) {
+ property.initializer = bin.right;
+ property.value = bin.left;
+ }
+ },
+ else => {},
}
+ }
- if (property.initializer != null) {
- const was_anonymous_named_expr = p.isAnonymousNamedExpr(property.initializer orelse unreachable);
- property.initializer = p.visitExprInOut(property.initializer.?, ExprIn{ .assign_target = in.assign_target });
+ if (property.value != null) {
+ property.value = p.visitExprInOut(property.value.?, ExprIn{ .assign_target = in.assign_target });
+ }
- if (property.value) |val| {
- if (@as(Expr.Tag, val.data) == .e_identifier) {
- property.initializer = p.maybeKeepExprSymbolName(
- property.initializer orelse unreachable,
- p.symbols.items[val.data.e_identifier.ref.inner_index].original_name,
- was_anonymous_named_expr,
- );
- }
+ if (property.initializer != null) {
+ const was_anonymous_named_expr = p.isAnonymousNamedExpr(property.initializer orelse unreachable);
+ property.initializer = p.visitExprInOut(property.initializer.?, ExprIn{ .assign_target = in.assign_target });
+
+ if (property.value) |val| {
+ if (@as(Expr.Tag, val.data) == .e_identifier) {
+ property.initializer = p.maybeKeepExprSymbolName(
+ property.initializer orelse unreachable,
+ p.symbols.items[val.data.e_identifier.ref.inner_index].original_name,
+ was_anonymous_named_expr,
+ );
}
}
-
- // TODO: can we avoid htis copy
- e_.properties[i] = property;
}
+
+ // TODO: can we avoid htis copy
+ e_.properties[i] = property;
}
},
.e_import => |e_| {
@@ -8931,16 +8957,19 @@ pub const P = struct {
p.fn_only_data_visit.is_inside_async_arrow_fn = e_.is_async or p.fn_only_data_visit.is_inside_async_arrow_fn;
p.pushScopeForVisitPass(.function_args, expr.loc) catch unreachable;
+ var dupe = p.allocator.dupe(Stmt, e_.body.stmts) catch unreachable;
+
p.visitArgs(e_.args, VisitArgsOpts{
.has_rest_arg = e_.has_rest_arg,
- .body = e_.body.stmts,
+ .body = dupe,
.is_unique_formal_parameters = true,
});
p.pushScopeForVisitPass(.function_body, e_.body.loc) catch unreachable;
- var stmts_list = List(Stmt).fromOwnedSlice(p.allocator, e_.body.stmts);
+ var stmts_list = List(Stmt).fromOwnedSlice(p.allocator, dupe);
var temp_opts = PrependTempRefsOpts{ .kind = StmtsKind.fn_body };
p.visitStmtsAndPrependTempRefs(&stmts_list, &temp_opts) catch unreachable;
+ p.allocator.free(e_.body.stmts);
e_.body.stmts = stmts_list.toOwnedSlice();
p.popScope();
p.popScope();
@@ -9015,17 +9044,18 @@ pub const P = struct {
}
pub fn keepExprSymbolName(p: *P, _value: Expr, name: string) Expr {
- var start = p.expr_list.items.len;
- p.expr_list.ensureUnusedCapacity(2) catch unreachable;
- p.expr_list.appendAssumeCapacity(_value);
- p.expr_list.appendAssumeCapacity(p.e(E.String{
- .utf8 = name,
- }, _value.loc));
+ return _value;
+ // var start = p.expr_list.items.len;
+ // p.expr_list.ensureUnusedCapacity(2) catch unreachable;
+ // p.expr_list.appendAssumeCapacity(_value);
+ // p.expr_list.appendAssumeCapacity(p.e(E.String{
+ // .utf8 = name,
+ // }, _value.loc));
- var value = p.callRuntime(_value.loc, "ℹ", p.expr_list.items[start..p.expr_list.items.len]);
- // Make sure tree shaking removes this if the function is never used
- value.data.e_call.can_be_unwrapped_if_unused = true;
- return value;
+ // var value = p.callRuntime(_value.loc, "ℹ", p.expr_list.items[start..p.expr_list.items.len]);
+ // // Make sure tree shaking removes this if the function is never used
+ // value.data.e_call.can_be_unwrapped_if_unused = true;
+ // return value;
}
pub fn fnBodyContainsUseStrict(body: []Stmt) ?logger.Loc {
@@ -9719,9 +9749,9 @@ pub const P = struct {
.s_for => |data| {
{
p.pushScopeForVisitPass(.block, stmt.loc) catch unreachable;
- defer p.popScope();
+
if (data.init) |initst| {
- _ = p.visitForLoopInit(initst, false);
+ data.init = p.visitForLoopInit(initst, false);
}
if (data.test_) |test_| {
@@ -9735,6 +9765,7 @@ pub const P = struct {
}
data.body = p.visitLoopBody(data.body);
+ p.popScope();
}
// TODO: Potentially relocate "var" declarations to the top level
@@ -9782,32 +9813,36 @@ pub const P = struct {
// p.lowerObjectRestInForLoopInit(s.Init, &s.Body)
},
.s_try => |data| {
+ p.pushScopeForVisitPass(.block, stmt.loc) catch unreachable;
{
- p.pushScopeForVisitPass(.block, stmt.loc) catch unreachable;
- defer p.popScope();
- p.fn_or_arrow_data_visit.try_body_count += 1;
- defer p.fn_or_arrow_data_visit.try_body_count -= 1;
var _stmts = List(Stmt).fromOwnedSlice(p.allocator, data.body);
+ p.fn_or_arrow_data_visit.try_body_count += 1;
p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
+ p.fn_or_arrow_data_visit.try_body_count -= 1;
data.body = _stmts.toOwnedSlice();
}
+ p.popScope();
if (data.catch_) |*catch_| {
p.pushScopeForVisitPass(.block, catch_.loc) catch unreachable;
- defer p.popScope();
- if (catch_.binding != null and @as(Binding.Tag, catch_.binding.?.data) != .b_missing) {
- p.visitBinding(catch_.binding.?, null);
+ {
+ if (catch_.binding != null) {
+ p.visitBinding(catch_.binding.?, null);
+ }
+ var _stmts = List(Stmt).fromOwnedSlice(p.allocator, catch_.body);
+ p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
+ catch_.body = _stmts.toOwnedSlice();
}
- var _stmts = List(Stmt).fromOwnedSlice(p.allocator, data.body);
- p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
- catch_.body = _stmts.toOwnedSlice();
+ p.popScope();
}
if (data.finally) |*finally| {
p.pushScopeForVisitPass(.block, finally.loc) catch unreachable;
- var _stmts = List(Stmt).fromOwnedSlice(p.allocator, data.body);
- p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
- finally.stmts = _stmts.toOwnedSlice();
+ {
+ var _stmts = List(Stmt).fromOwnedSlice(p.allocator, finally.stmts);
+ p.visitStmts(&_stmts, StmtsKind.none) catch unreachable;
+ finally.stmts = _stmts.toOwnedSlice();
+ }
p.popScope();
}
},
@@ -10289,9 +10324,10 @@ pub const P = struct {
pub fn visitLoopBody(p: *P, stmt: StmtNodeIndex) StmtNodeIndex {
const old_is_inside_loop = p.fn_or_arrow_data_visit.is_inside_loop;
p.fn_or_arrow_data_visit.is_inside_loop = true;
- defer p.fn_or_arrow_data_visit.is_inside_loop = old_is_inside_loop;
p.loop_body = stmt.data;
- return p.visitSingleStmt(stmt, .loop_body);
+ const res = p.visitSingleStmt(stmt, .loop_body);
+ p.fn_or_arrow_data_visit.is_inside_loop = old_is_inside_loop;
+ return res;
}
pub fn visitSingleStmt(p: *P, stmt: Stmt, kind: StmtsKind) Stmt {
@@ -10328,19 +10364,9 @@ pub const P = struct {
return Stmt{ .data = Prefill.Data.SEmpty, .loc = loc };
}
- if (stmts.len == 1) {
- switch (stmts[0].data) {
- .s_local => |local| {
- // "let" and "const" must be put in a block when in a single-statement context
-
- if (local.kind == .k_var) {
- return stmts[0];
- }
- },
- else => {
- return stmts[0];
- },
- }
+ if (stmts.len == 1 and std.meta.activeTag(stmts[0].data) != .s_local or (std.meta.activeTag(stmts[0].data) == .s_local and stmts[0].data.s_local.kind == S.Local.Kind.k_var)) {
+ // "let" and "const" must be put in a block when in a single-statement context
+ return stmts[0];
}
return p.s(S.Block{ .stmts = stmts }, loc);
@@ -10622,7 +10648,7 @@ pub const P = struct {
// values that introduce new scopes and declare new symbols. If this is an
// arrow function, then those new scopes will need to be parented under the
// scope of the arrow function itself.
- const scopeIndex = try p.pushScopeForParsePass(.function_args, loc);
+ const scope_index = try p.pushScopeForParsePass(.function_args, loc);
// Allow "in" inside parentheses
var oldAllowIn = p.allow_in;
@@ -10754,12 +10780,12 @@ pub const P = struct {
// If we get here, it's not an arrow function so undo the pushing of the
// scope we did earlier. This needs to flatten any child scopes into the
// parent scope as if the scope was never pushed in the first place.
- p.popAndFlattenScope(scopeIndex);
+ p.popAndFlattenScope(scope_index);
// If this isn't an arrow function, then types aren't allowed
if (type_colon_range.len > 0) {
try p.log.addRangeError(p.source, type_colon_range, "Unexpected \":\"");
- p.panic("", .{});
+ return error.ParseError;
}
// Are these arguments for a call to a function named "async"?
@@ -10774,7 +10800,7 @@ pub const P = struct {
p.logExprErrors(&errors);
if (spread_range.len > 0) {
try p.log.addRangeError(p.source, type_colon_range, "Unexpected \"...\"");
- p.panic("", .{});
+ return error.ParseError;
}
var value = Expr.joinAllWithComma(items, p.allocator);
@@ -10784,7 +10810,7 @@ pub const P = struct {
// Indicate that we expected an arrow function
try p.lexer.expected(.t_equals_greater_than);
- p.panic("", .{});
+ return error.ParseError;
}
// This code is tricky.
@@ -10793,6 +10819,7 @@ pub const P = struct {
// The key is in how we remove scopes from the list
// If we do an orderedRemove, it gets very slow.
// swapRemove is fast. But a little more dangerous.
+ // Instead, we just tombstone it.
pub fn popAndFlattenScope(p: *P, scope_index: usize) void {
// Move up to the parent scope
var to_flatten = p.current_scope;
diff --git a/src/js_parser/js_parser_test.zig b/src/js_parser/js_parser_test.zig
index 36252f715..0d76efc37 100644
--- a/src/js_parser/js_parser_test.zig
+++ b/src/js_parser/js_parser_test.zig
@@ -177,19 +177,21 @@ const SymbolList = [][]Symbol;
fn expectPrinted(t: *Tester, contents: string, expected: string, src: anytype) !void {
if (alloc.needs_setup) {
- try alloc.setup(std.heap.page_allocator);
+ try alloc.setup(std.heap.c_allocator);
+ var __source = Output.Source.init(std.io.getStdOut(), std.io.getStdErr());
+
+ Output.Source.set(&__source);
}
debugl("INIT TEST");
- const opts = try options.TransformOptions.initUncached(alloc.dynamic, "file.jsx", contents);
var log = logger.Log.init(alloc.dynamic);
- var source = logger.Source.initFile(opts.entry_point, alloc.dynamic);
+ var source = logger.Source.initPathString("file.jsx", contents);
var ast: js_ast.Ast = undefined;
var define = try Define.init(alloc.dynamic, null);
debugl("INIT PARSER");
- var parser = try Parser.init(opts, &log, &source, define, alloc.dynamic);
+ var parser = try Parser.init(Parser.Options{ .jsx = .{} }, &log, &source, define, alloc.dynamic);
debugl("RUN PARSER");
var res = try parser.parse();
@@ -223,10 +225,10 @@ fn expectPrinted(t: *Tester, contents: string, expected: string, src: anytype) !
const PRINT_AST = false;
test "expectPrint" {
- var t_ = Tester.t(std.heap.page_allocator);
+ var t_ = Tester.t(std.heap.c_allocator);
var t = &t_;
// try expectPrinted(t, @embedFile("../test/fixtures/function-scope-bug.jsx"), @embedFile("../test/fixtures/function-scope-bug.jsx"), @src());
- try expectPrinted(t, @embedFile("../test/fixtures/simple.jsx"), @embedFile("../test/fixtures/simple.jsx"), @src());
+ try expectPrinted(t, @embedFile("../test/fixtures/cannot-assign-to-import-bug.js"), @embedFile("../test/fixtures/cannot-assign-to-import-bug.js"), @src());
// try expectPrinted(t, "if (true) { console.log(\"hi\"); }", "if (true) { console.log(\"hi\"); }", @src());
// try expectPrinted(t, "try { console.log(\"hi\"); }\ncatch(er) { console.log('noooo'); }", "class Foo {\n foo() {\n }\n}\n", @src());
diff --git a/src/js_printer.zig b/src/js_printer.zig
index f3b735688..a0142983f 100644
--- a/src/js_printer.zig
+++ b/src/js_printer.zig
@@ -1080,10 +1080,10 @@ pub fn NewPrinter(comptime ascii_only: bool) type {
flags.forbid_in = true;
p.printExpr(e.test_, .conditional, flags);
p.printSpace();
- p.print("?");
+ p.print("? ");
p.printExpr(e.yes, .yield, ExprFlag.None());
p.printSpace();
- p.print(":");
+ p.print(": ");
flags.forbid_in = true;
p.printExpr(e.no, .yield, flags);
if (wrap) {
diff --git a/src/json_parser.zig b/src/json_parser.zig
index 120bcb4d7..ee28c2a93 100644
--- a/src/json_parser.zig
+++ b/src/json_parser.zig
@@ -249,7 +249,7 @@ const js_printer = @import("js_printer.zig");
const renamer = @import("renamer.zig");
const SymbolList = [][]Symbol;
-fn expectPrintedJSON(_contents: string, expected: string) void {
+fn expectPrintedJSON(_contents: string, expected: string) !void {
var contents = alloc.dynamic.alloc(u8, _contents.len + 1) catch unreachable;
std.mem.copy(u8, contents, _contents);
contents[contents.len - 1] = ';';
@@ -292,20 +292,21 @@ fn expectPrintedJSON(_contents: string, expected: string) void {
}
test "ParseJSON" {
- expectPrintedJSON("true", "true");
- expectPrintedJSON("false", "false");
- expectPrintedJSON("1", "1");
- expectPrintedJSON("10", "10");
- expectPrintedJSON("100", "100");
- expectPrintedJSON("100.1", "100.1");
- expectPrintedJSON("19.1", "19.1");
- expectPrintedJSON("19.12", "19.12");
- expectPrintedJSON("3.4159820837456", "3.4159820837456");
- expectPrintedJSON("-10000.25", "-10000.25");
- expectPrintedJSON("\"hi\"", "\"hi\"");
- expectPrintedJSON("{\"hi\": 1, \"hey\": \"200\", \"boom\": {\"yo\": true}}", "({\"hi\": 1, \"hey\": \"200\", \"boom\": {\"yo\": true}})");
- expectPrintedJSON("{\"hi\": \"hey\"}", "({hi: \"hey\"})");
- expectPrintedJSON("{\"hi\": [\"hey\", \"yo\"]}", "({hi:[\"hey\",\"yo\"]})");
+ try alloc.setup(std.heap.c_allocator);
+ try expectPrintedJSON("true", "true");
+ try expectPrintedJSON("false", "false");
+ try expectPrintedJSON("1", "1");
+ try expectPrintedJSON("10", "10");
+ try expectPrintedJSON("100", "100");
+ try expectPrintedJSON("100.1", "100.1");
+ try expectPrintedJSON("19.1", "19.1");
+ try expectPrintedJSON("19.12", "19.12");
+ try expectPrintedJSON("3.4159820837456", "3.4159820837456");
+ try expectPrintedJSON("-10000.25", "-10000.25");
+ try expectPrintedJSON("\"hi\"", "\"hi\"");
+ try expectPrintedJSON("{\"hi\": 1, \"hey\": \"200\", \"boom\": {\"yo\": true}}", "({\"hi\": 1, \"hey\": \"200\", \"boom\": {\"yo\": true}})");
+ try expectPrintedJSON("{\"hi\": \"hey\"}", "({hi: \"hey\"})");
+ try expectPrintedJSON("{\"hi\": [\"hey\", \"yo\"]}", "({hi:[\"hey\",\"yo\"]})");
// TODO: emoji?
}
diff --git a/src/logger.zig b/src/logger.zig
index 98149cb5b..2584566eb 100644
--- a/src/logger.zig
+++ b/src/logger.zig
@@ -558,7 +558,7 @@ test "print msg" {
}
test "ErrorPosition" {
- const source = Source{ .contents = @embedFile("./test/fixtures/simple.jsx"), .path = fs.Path.init("/src/test/fixtures/simple.jsx"), .identifier_name = "simple" };
+ const source = Source.initPathString("/src/test/fixtures/simple.jsx", @embedFile("./test/fixtures/simple.jsx"));
const error_position = source.initErrorPosition(Loc{ .start = 979 });
std.testing.expectEqual(@as(usize, 973), @as(usize, error_position.line_start));
diff --git a/src/options.zig b/src/options.zig
index 4a2141a8a..6dd6ddbae 100644
--- a/src/options.zig
+++ b/src/options.zig
@@ -335,7 +335,7 @@ pub const BundleOptions = struct {
}
var resolved_defines = try defines.DefineData.from_input(user_defines, log, allocator);
-
+ const output_dir_parts = [_]string{ try std.process.getCwdAlloc(allocator), transform.output_dir orelse "out" };
var opts: BundleOptions = BundleOptions{
.log = log,
.resolve_mode = transform.resolve orelse .dev,
@@ -343,6 +343,7 @@ pub const BundleOptions = struct {
allocator,
resolved_defines,
),
+ .output_dir = try std.fs.path.join(allocator, &output_dir_parts),
.loaders = loaders,
.write = transform.write orelse false,
.external = ExternalModules.init(allocator, &fs.fs, fs.top_level_dir, transform.external, log),
@@ -459,12 +460,3 @@ pub const TransformResult = struct {
};
}
};
-
-test "TransformOptions.initUncached" {
- try alloc.setup(std.heap.page_allocator);
- const opts = try TransformOptions.initUncached(alloc.dynamic, "lol.jsx", "<Hi />");
-
- std.testing.expectEqualStrings("lol", opts.entry_point.path.name.base);
- std.testing.expectEqualStrings(".jsx", opts.entry_point.path.name.ext);
- std.testing.expect(Loader.jsx == opts.loader);
-}
diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig
index afa190c4c..6304d032e 100644
--- a/src/resolver/resolver.zig
+++ b/src/resolver/resolver.zig
@@ -5,14 +5,72 @@ const options = @import("../options.zig");
const Fs = @import("../fs.zig");
const std = @import("std");
const cache = @import("../cache.zig");
-
+const sync = @import("../sync.zig");
const TSConfigJSON = @import("./tsconfig_json.zig").TSConfigJSON;
const PackageJSON = @import("./package_json.zig").PackageJSON;
usingnamespace @import("./data_url.zig");
+const Wyhash = std.hash.Wyhash;
+const hash_map_v2 = @import("../hash_map_v2.zig");
+const Mutex = sync.Mutex;
const StringBoolMap = std.StringHashMap(bool);
+// https://en.wikipedia.org/wiki/.bss#BSS_in_C
+pub fn BSSSectionAllocator(comptime size: usize) type {
+ const FixedBufferAllocator = std.heap.FixedBufferAllocator;
+ return struct {
+ var backing_buf: [size]u8 = undefined;
+ var fixed_buffer_allocator = FixedBufferAllocator.init(&backing_buf);
+ var buf_allocator = &fixed_buffer_allocator.allocator;
+ const Allocator = std.mem.Allocator;
+ const Self = @This();
+
+ allocator: Allocator,
+ fallback_allocator: *Allocator,
+
+ pub fn get(self: *Self) *Allocator {
+ return &self.allocator;
+ }
+
+ pub fn init(fallback_allocator: *Allocator) Self {
+ return Self{ .fallback_allocator = fallback_allocator, .allocator = Allocator{
+ .allocFn = BSSSectionAllocator(size).alloc,
+ .resizeFn = BSSSectionAllocator(size).resize,
+ } };
+ }
+
+ pub fn alloc(
+ allocator: *Allocator,
+ len: usize,
+ ptr_align: u29,
+ len_align: u29,
+ return_address: usize,
+ ) error{OutOfMemory}![]u8 {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ return buf_allocator.allocFn(buf_allocator, len, ptr_align, len_align, return_address) catch
+ return self.fallback_allocator.allocFn(self.fallback_allocator, len, ptr_align, len_align, return_address);
+ }
+
+ pub fn resize(
+ allocator: *Allocator,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ return_address: usize,
+ ) error{OutOfMemory}!usize {
+ const self = @fieldParentPtr(Self, "allocator", allocator);
+ if (fixed_buffer_allocator.ownsPtr(buf.ptr)) {
+ return fixed_buffer_allocator.allocator.resizeFn(&fixed_buffer_allocator.allocator, buf, buf_align, new_len, len_align, return_address);
+ } else {
+ return self.fallback_allocator.resizeFn(self.fallback_allocator, buf, buf_align, new_len, len_align, return_address);
+ }
+ }
+ };
+}
+
const Path = Fs.Path;
+
pub const SideEffectsData = struct {
source: *logger.Source,
range: logger.Range,
@@ -22,21 +80,131 @@ pub const SideEffectsData = struct {
};
pub const DirInfo = struct {
+ pub const Index = u32;
+
// These objects are immutable, so we can just point to the parent directory
// and avoid having to lock the cache again
- parent: ?*DirInfo = null,
+ parent: Index = HashMap.NotFound,
// A pointer to the enclosing dirInfo with a valid "browser" field in
// package.json. We need this to remap paths after they have been resolved.
- enclosing_browser_scope: ?*DirInfo = null,
+ enclosing_browser_scope: Index = HashMap.NotFound,
- abs_path: string,
- entries: Fs.FileSystem.DirEntry,
+ abs_path: string = "",
+ entries: Fs.FileSystem.DirEntry = undefined,
has_node_modules: bool = false, // Is there a "node_modules" subdirectory?
package_json: ?*PackageJSON = null, // Is there a "package.json" file?
tsconfig_json: ?*TSConfigJSON = null, // Is there a "tsconfig.json" file in this directory or a parent directory?
abs_real_path: string = "", // If non-empty, this is the real absolute path resolving any symlinks
+ pub fn getParent(i: *DirInfo) ?*DirInfo {
+ if (i.parent == HashMap.NotFound) return null;
+ std.debug.assert(i.parent < HashMap.instance._data.len);
+ return &HashMap.instance._data.items(.value)[i.parent];
+ }
+ pub fn getEnclosingBrowserScope(i: *DirInfo) ?*DirInfo {
+ if (i.enclosing_browser_scope == HashMap.NotFound) return null;
+ std.debug.assert(i.enclosing_browser_scope < HashMap.instance._data.len);
+ return &HashMap.instance._data.items(.value)[i.enclosing_browser_scope];
+ }
+
+ // Goal: Really fast, low allocation directory map exploiting cache locality where we don't worry about lifetimes much.
+ // 1. Don't store the keys or values of directories that don't exist
+ // 2. Don't expect a provided key to exist after it's queried
+ // 3. Store whether a directory has been queried and whether that query was successful.
+ // 4. Allocate onto the https://en.wikipedia.org/wiki/.bss#BSS_in_C instead of the heap, so we can avoid memory leaks
+ pub const HashMap = struct {
+ // In a small next.js app with few additional dependencies, there are 191 directories in the node_modules folder
+ // fd . -d 9999 -L -t d --no-ignore | wc -l
+ const PreallocatedCount = 256;
+ const StringAllocatorSize = 128 * PreallocatedCount;
+ const FallbackStringAllocator = BSSSectionAllocator(StringAllocatorSize);
+ const FallbackAllocatorSize = @divExact(@bitSizeOf(Entry), 8) * PreallocatedCount;
+ const FallbackAllocator = BSSSectionAllocator(FallbackAllocatorSize);
+ const BackingHashMap = std.AutoHashMapUnmanaged(u64, Index);
+ pub const Entry = struct {
+ key: string,
+ value: DirInfo,
+ };
+ string_allocator: FallbackStringAllocator,
+ fallback_allocator: FallbackAllocator,
+ allocator: *std.mem.Allocator,
+ _data: std.MultiArrayList(Entry),
+ hash_map: BackingHashMap,
+ const Seed = 999;
+ pub const NotFound: Index = std.math.maxInt(Index);
+ var instance: HashMap = undefined;
+
+ pub fn at(d: *HashMap, index: Index) *DirInfo {
+ return &d._data.items(.value)[index];
+ }
+
+ pub const Result = struct {
+ index: Index = NotFound,
+ hash: u64 = 0,
+ status: Status = Status.unknown,
+
+ pub const Status = enum { unknown, not_found, exists };
+ };
+
+ // pub fn get(d: *HashMap, key: string) Result {
+ // const _key = Wyhash.hash(Seed, key);
+ // const index = d.hash_map.get(_key) orelse return Result{};
+
+ // return d._data.items(.value)[index];
+ // }
+
+ pub fn getOrPut(
+ d: *HashMap,
+ key: string,
+ ) Result {
+ const _key = Wyhash.hash(Seed, key);
+ const index = d.hash_map.get(_key) orelse return Result{
+ .index = std.math.maxInt(u32),
+ .status = .unknown,
+ .hash = _key,
+ };
+ if (index == NotFound) {
+ return Result{ .index = NotFound, .status = .not_found, .hash = _key };
+ }
+
+ return Result{ .index = index, .status = .exists, .hash = _key };
+ }
+
+ pub fn put(d: *HashMap, hash: u64, key: string, value: DirInfo) *DirInfo {
+ const entry = Entry{
+ .value = value,
+ .key = d.string_allocator.get().dupe(u8, key) catch unreachable,
+ };
+ const index = d._data.len;
+ d._data.append(d.fallback_allocator.get(), entry) catch unreachable;
+ d.hash_map.put(d.allocator, hash, @intCast(DirInfo.Index, index)) catch unreachable;
+ return &d._data.items(.value)[index];
+ }
+
+ pub fn init(allocator: *std.mem.Allocator) *HashMap {
+ var list = std.MultiArrayList(Entry){};
+ instance = HashMap{
+ ._data = undefined,
+ .string_allocator = FallbackStringAllocator.init(allocator),
+ .allocator = allocator,
+ .hash_map = BackingHashMap{},
+ .fallback_allocator = FallbackAllocator.init(allocator),
+ };
+ list.ensureTotalCapacity(instance.allocator, PreallocatedCount) catch unreachable;
+ instance._data = list;
+ return &instance;
+ }
+
+ pub fn markNotFound(d: *HashMap, hash: u64) void {
+ d.hash_map.put(d.allocator, hash, NotFound) catch unreachable;
+ }
+
+ pub fn deinit(i: *HashMap) void {
+ i._data.deinit(i.allocator);
+ i.hash_map.deinit(i.allocator);
+ }
+ };
};
pub const TemporaryBuffer = struct {
pub threadlocal var ExtensionPathBuf = std.mem.zeroes([512]u8);
@@ -93,11 +261,11 @@ pub const Resolver = struct {
// reducing parallelism in the resolver helps the rest of the bundler go
// faster. I'm not sure why this is but please don't change this unless you
// do a lot of testing with various benchmarks and there aren't any regressions.
- mutex: std.Thread.Mutex,
+ mutex: Mutex,
// This cache maps a directory path to information about that directory and
// all parent directories
- dir_cache: std.StringHashMap(?*DirInfo),
+ dir_cache: *DirInfo.HashMap,
pub fn init1(
allocator: *std.mem.Allocator,
@@ -107,8 +275,8 @@ pub const Resolver = struct {
) Resolver {
return Resolver{
.allocator = allocator,
- .dir_cache = std.StringHashMap(?*DirInfo).init(allocator),
- .mutex = std.Thread.Mutex{},
+ .dir_cache = DirInfo.HashMap.init(allocator),
+ .mutex = Mutex.init(),
.caches = cache.Cache.Set.init(allocator),
.opts = opts,
.fs = _fs,
@@ -340,8 +508,8 @@ pub const Resolver = struct {
return null;
}
- const hold = r.mutex.acquire();
- defer hold.release();
+ r.mutex.lock();
+ defer r.mutex.unlock();
var result = try r.resolveWithoutSymlinks(source_dir, import_path, kind);
@@ -429,7 +597,7 @@ pub const Resolver = struct {
// Check the "browser" map for the first time (1 out of 2)
if (r.dirInfoCached(std.fs.path.dirname(abs_path) orelse unreachable) catch null) |_import_dir_info| {
- if (_import_dir_info.enclosing_browser_scope) |import_dir_info| {
+ if (_import_dir_info.getEnclosingBrowserScope()) |import_dir_info| {
if (import_dir_info.package_json) |pkg| {
const pkg_json_dir = std.fs.path.dirname(pkg.source.key_path.text) orelse unreachable;
@@ -491,7 +659,7 @@ pub const Resolver = struct {
const source_dir_info = (r.dirInfoCached(source_dir) catch null) orelse return null;
// Support remapping one package path to another via the "browser" field
- if (source_dir_info.enclosing_browser_scope) |browser_scope| {
+ if (source_dir_info.getEnclosingBrowserScope()) |browser_scope| {
if (browser_scope.package_json) |package_json| {
if (r.checkBrowserMap(package_json, import_path)) |remapped| {
if (remapped.len == 0) {
@@ -533,7 +701,7 @@ pub const Resolver = struct {
while (iter.next()) |*path| {
const dirname = std.fs.path.dirname(path.text) orelse continue;
const base_dir_info = ((r.dirInfoCached(dirname) catch null)) orelse continue;
- const dir_info = base_dir_info.enclosing_browser_scope orelse continue;
+ const dir_info = base_dir_info.getEnclosingBrowserScope() orelse continue;
const pkg_json = dir_info.package_json orelse continue;
const rel_path = std.fs.path.relative(r.allocator, pkg_json.source.key_path.text, path.text) catch continue;
if (r.checkBrowserMap(pkg_json, rel_path)) |remapped| {
@@ -597,7 +765,7 @@ pub const Resolver = struct {
if (r.loadAsFileOrDirectory(abs, kind)) |res| {
return res;
}
- r.allocator.free(abs);
+ // r.allocator.free(abs);
}
}
@@ -617,10 +785,10 @@ pub const Resolver = struct {
if (r.loadAsFileOrDirectory(abs_path, kind)) |res| {
return res;
}
- r.allocator.free(abs_path);
+ // r.allocator.free(abs_path);
}
- dir_info = dir_info.parent orelse break;
+ dir_info = dir_info.getParent() orelse break;
}
// Mostly to cut scope, we don't resolve `NODE_PATH` environment variable.
@@ -720,11 +888,30 @@ pub const Resolver = struct {
}
fn dirInfoCached(r: *Resolver, path: string) !?*DirInfo {
- const info = r.dir_cache.get(path) orelse try r.dirInfoUncached(path);
+ var dir_info_entry = r.dir_cache.getOrPut(
+ path,
+ );
+
+ switch (dir_info_entry.status) {
+ .unknown => {
+ return try r.dirInfoUncached(path, dir_info_entry);
+ },
+ .not_found => {
+ return null;
+ },
+ .exists => {
+ return r.dir_cache.at(dir_info_entry.index);
+ },
+ }
+ // if (__entry.found_existing) {
+ // return if (__entry.entry.value == DirInfo.NotFound) null else __entry.entry.value;
+ // }
- try r.dir_cache.put(path, info);
+ // __entry.entry.value = DirInfo.NotFound;
- return info;
+ // try r.dirInfoUncached(path);
+ // const entry = r.dir_cache.get(path) orelse unreachable;
+ // return if (__entry.entry.value == DirInfo.NotFound) null else entry;
}
pub const MatchResult = struct {
@@ -939,7 +1126,7 @@ pub const Resolver = struct {
}
// Potentially remap using the "browser" field
- if (dir_info.enclosing_browser_scope) |browser_scope| {
+ if (dir_info.getEnclosingBrowserScope()) |browser_scope| {
if (browser_scope.package_json) |browser_json| {
if (r.checkBrowserMap(browser_json, field_rel_path)) |remap| {
// Is the path disabled?
@@ -1002,7 +1189,7 @@ pub const Resolver = struct {
}
pub fn loadAsIndexWithBrowserRemapping(r: *Resolver, dir_info: *DirInfo, path: string, extension_order: []const string) ?MatchResult {
- if (dir_info.enclosing_browser_scope) |browser_scope| {
+ if (dir_info.getEnclosingBrowserScope()) |browser_scope| {
comptime const field_rel_path = "index";
if (browser_scope.package_json) |browser_json| {
if (r.checkBrowserMap(browser_json, field_rel_path)) |remap| {
@@ -1280,12 +1467,23 @@ pub const Resolver = struct {
return null;
}
- fn dirInfoUncached(r: *Resolver, path: string) anyerror!?*DirInfo {
+ fn dirInfoUncached(r: *Resolver, path: string, result: DirInfo.HashMap.Result) anyerror!?*DirInfo {
var rfs: *Fs.FileSystem.RealFS = &r.fs.fs;
var parent: ?*DirInfo = null;
- const parent_dir = std.fs.path.dirname(path) orelse return null;
- if (!strings.eql(parent_dir, path)) {
- parent = try r.dirInfoCached(parent_dir);
+
+ const parent_dir = std.fs.path.dirname(path) orelse {
+ r.dir_cache.markNotFound(result.hash);
+ return null;
+ };
+
+ var parent_result: DirInfo.HashMap.Result = undefined;
+ if (parent_dir.len > 1 and !strings.eql(parent_dir, path)) {
+ parent = (try r.dirInfoCached(parent_dir)) orelse {
+ r.dir_cache.markNotFound(result.hash);
+ return null;
+ };
+
+ parent_result = r.dir_cache.getOrPut(parent_dir);
}
// List the directories
@@ -1323,6 +1521,7 @@ pub const Resolver = struct {
@errorName(_entries.err.original_err),
},
) catch {};
+ r.dir_cache.markNotFound(result.hash);
return null;
},
}
@@ -1330,10 +1529,9 @@ pub const Resolver = struct {
entries = _entries.entries;
}
- var info = try r.allocator.create(DirInfo);
- info.* = DirInfo{
+ var info = DirInfo{
.abs_path = path,
- .parent = parent,
+ .parent = if (parent != null) parent_result.index else DirInfo.HashMap.NotFound,
.entries = entries,
};
@@ -1382,7 +1580,8 @@ pub const Resolver = struct {
if (info.package_json) |pkg| {
if (pkg.browser_map.count() > 0) {
- info.enclosing_browser_scope = info;
+ // it has not been written yet, so we reserve the next index
+ info.enclosing_browser_scope = @intCast(DirInfo.Index, DirInfo.HashMap.instance._data.len);
}
if (r.debug_logs) |*logs| {
@@ -1438,6 +1637,7 @@ pub const Resolver = struct {
info.tsconfig_json = parent.?.tsconfig_json;
}
- return info;
+ info.entries = entries;
+ return r.dir_cache.put(result.hash, path, info);
}
};
diff --git a/src/test/fixtures/cannot-assign-to-import-bug.js b/src/test/fixtures/cannot-assign-to-import-bug.js
new file mode 100644
index 000000000..cbf9087f1
--- /dev/null
+++ b/src/test/fixtures/cannot-assign-to-import-bug.js
@@ -0,0 +1,369 @@
+/** @license React v17.0.2
+ * react.production.min.js
+ *
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+"use strict";
+var l = require("object-assign"),
+ n = 60103,
+ p = 60106;
+exports.Fragment = 60107;
+exports.StrictMode = 60108;
+exports.Profiler = 60114;
+var q = 60109,
+ r = 60110,
+ t = 60112;
+exports.Suspense = 60113;
+var u = 60115,
+ v = 60116;
+if ("function" === typeof Symbol && Symbol.for) {
+ var w = Symbol.for;
+ n = w("react.element");
+ p = w("react.portal");
+ exports.Fragment = w("react.fragment");
+ exports.StrictMode = w("react.strict_mode");
+ exports.Profiler = w("react.profiler");
+ q = w("react.provider");
+ r = w("react.context");
+ t = w("react.forward_ref");
+ exports.Suspense = w("react.suspense");
+ u = w("react.memo");
+ v = w("react.lazy");
+}
+var x = "function" === typeof Symbol && Symbol.iterator;
+function y(a) {
+ if (null === a || "object" !== typeof a) return null;
+ a = (x && a[x]) || a["@@iterator"];
+ return "function" === typeof a ? a : null;
+}
+function z(a) {
+ for (
+ var b = "https://reactjs.org/docs/error-decoder.html?invariant=" + a, c = 1;
+ c < arguments.length;
+ c++
+ )
+ b += "&args[]=" + encodeURIComponent(arguments[c]);
+ return (
+ "Minified React error #" +
+ a +
+ "; visit " +
+ b +
+ " for the full message or use the non-minified dev environment for full errors and additional helpful warnings."
+ );
+}
+var A = {
+ isMounted: function () {
+ return !1;
+ },
+ enqueueForceUpdate: function () {},
+ enqueueReplaceState: function () {},
+ enqueueSetState: function () {},
+ },
+ B = {};
+function C(a, b, c) {
+ this.props = a;
+ this.context = b;
+ this.refs = B;
+ this.updater = c || A;
+}
+C.prototype.isReactComponent = {};
+C.prototype.setState = function (a, b) {
+ if ("object" !== typeof a && "function" !== typeof a && null != a)
+ throw Error(z(85));
+ this.updater.enqueueSetState(this, a, b, "setState");
+};
+C.prototype.forceUpdate = function (a) {
+ this.updater.enqueueForceUpdate(this, a, "forceUpdate");
+};
+function D() {}
+D.prototype = C.prototype;
+function E(a, b, c) {
+ this.props = a;
+ this.context = b;
+ this.refs = B;
+ this.updater = c || A;
+}
+var F = (E.prototype = new D());
+F.constructor = E;
+l(F, C.prototype);
+F.isPureReactComponent = !0;
+var G = { current: null },
+ H = Object.prototype.hasOwnProperty,
+ I = { key: !0, ref: !0, __self: !0, __source: !0 };
+function J(a, b, c) {
+ var e,
+ d = {},
+ k = null,
+ h = null;
+ if (null != b)
+ for (e in (void 0 !== b.ref && (h = b.ref),
+ void 0 !== b.key && (k = "" + b.key),
+ b))
+ H.call(b, e) && !I.hasOwnProperty(e) && (d[e] = b[e]);
+ var g = arguments.length - 2;
+ if (1 === g) d.children = c;
+ else if (1 < g) {
+ for (var f = Array(g), m = 0; m < g; m++) f[m] = arguments[m + 2];
+ d.children = f;
+ }
+ if (a && a.defaultProps)
+ for (e in ((g = a.defaultProps), g)) void 0 === d[e] && (d[e] = g[e]);
+ return { $$typeof: n, type: a, key: k, ref: h, props: d, _owner: G.current };
+}
+function K(a, b) {
+ return {
+ $$typeof: n,
+ type: a.type,
+ key: b,
+ ref: a.ref,
+ props: a.props,
+ _owner: a._owner,
+ };
+}
+function L(a) {
+ return "object" === typeof a && null !== a && a.$$typeof === n;
+}
+function escape(a) {
+ var b = { "=": "=0", ":": "=2" };
+ return (
+ "$" +
+ a.replace(/[=:]/g, function (a) {
+ return b[a];
+ })
+ );
+}
+var M = /\/+/g;
+function N(a, b) {
+ return "object" === typeof a && null !== a && null != a.key
+ ? escape("" + a.key)
+ : b.toString(36);
+}
+function O(a, b, c, e, d) {
+ var k = typeof a;
+ if ("undefined" === k || "boolean" === k) a = null;
+ var h = !1;
+ if (null === a) h = !0;
+ else
+ switch (k) {
+ case "string":
+ case "number":
+ h = !0;
+ break;
+ case "object":
+ switch (a.$$typeof) {
+ case n:
+ case p:
+ h = !0;
+ }
+ }
+ if (h)
+ return (
+ (h = a),
+ (d = d(h)),
+ (a = "" === e ? "." + N(h, 0) : e),
+ Array.isArray(d)
+ ? ((c = ""),
+ null != a && (c = a.replace(M, "$&/") + "/"),
+ O(d, b, c, "", function (a) {
+ return a;
+ }))
+ : null != d &&
+ (L(d) &&
+ (d = K(
+ d,
+ c +
+ (!d.key || (h && h.key === d.key)
+ ? ""
+ : ("" + d.key).replace(M, "$&/") + "/") +
+ a
+ )),
+ b.push(d)),
+ 1
+ );
+ h = 0;
+ e = "" === e ? "." : e + ":";
+ if (Array.isArray(a))
+ for (var g = 0; g < a.length; g++) {
+ k = a[g];
+ var f = e + N(k, g);
+ h += O(k, b, c, f, d);
+ }
+ else if (((f = y(a)), "function" === typeof f))
+ for (a = f.call(a), g = 0; !(k = a.next()).done; )
+ (k = k.value), (f = e + N(k, g++)), (h += O(k, b, c, f, d));
+ else if ("object" === k)
+ throw (
+ ((b = "" + a),
+ Error(
+ z(
+ 31,
+ "[object Object]" === b
+ ? "object with keys {" + Object.keys(a).join(", ") + "}"
+ : b
+ )
+ ))
+ );
+ return h;
+}
+function P(a, b, c) {
+ if (null == a) return a;
+ var e = [],
+ d = 0;
+ O(a, e, "", "", function (a) {
+ return b.call(c, a, d++);
+ });
+ return e;
+}
+function Q(a) {
+ if (-1 === a._status) {
+ var b = a._result;
+ b = b();
+ a._status = 0;
+ a._result = b;
+ b.then(
+ function (b) {
+ 0 === a._status && ((b = b.default), (a._status = 1), (a._result = b));
+ },
+ function (b) {
+ 0 === a._status && ((a._status = 2), (a._result = b));
+ }
+ );
+ }
+ if (1 === a._status) return a._result;
+ throw a._result;
+}
+var R = { current: null };
+function S() {
+ var a = R.current;
+ if (null === a) throw Error(z(321));
+ return a;
+}
+var T = {
+ ReactCurrentDispatcher: R,
+ ReactCurrentBatchConfig: { transition: 0 },
+ ReactCurrentOwner: G,
+ IsSomeRendererActing: { current: !1 },
+ assign: l,
+};
+exports.Children = {
+ map: P,
+ forEach: function (a, b, c) {
+ P(
+ a,
+ function () {
+ b.apply(this, arguments);
+ },
+ c
+ );
+ },
+ count: function (a) {
+ var b = 0;
+ P(a, function () {
+ b++;
+ });
+ return b;
+ },
+ toArray: function (a) {
+ return (
+ P(a, function (a) {
+ return a;
+ }) || []
+ );
+ },
+ only: function (a) {
+ if (!L(a)) throw Error(z(143));
+ return a;
+ },
+};
+exports.Component = C;
+exports.PureComponent = E;
+exports.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED = T;
+exports.cloneElement = function (a, b, c) {
+ if (null === a || void 0 === a) throw Error(z(267, a));
+ var e = l({}, a.props),
+ d = a.key,
+ k = a.ref,
+ h = a._owner;
+ if (null != b) {
+ void 0 !== b.ref && ((k = b.ref), (h = G.current));
+ void 0 !== b.key && (d = "" + b.key);
+ if (a.type && a.type.defaultProps) var g = a.type.defaultProps;
+ for (f in b)
+ H.call(b, f) &&
+ !I.hasOwnProperty(f) &&
+ (e[f] = void 0 === b[f] && void 0 !== g ? g[f] : b[f]);
+ }
+ var f = arguments.length - 2;
+ if (1 === f) e.children = c;
+ else if (1 < f) {
+ g = Array(f);
+ for (var m = 0; m < f; m++) g[m] = arguments[m + 2];
+ e.children = g;
+ }
+ return { $$typeof: n, type: a.type, key: d, ref: k, props: e, _owner: h };
+};
+exports.createContext = function (a, b) {
+ void 0 === b && (b = null);
+ a = {
+ $$typeof: r,
+ _calculateChangedBits: b,
+ _currentValue: a,
+ _currentValue2: a,
+ _threadCount: 0,
+ Provider: null,
+ Consumer: null,
+ };
+ a.Provider = { $$typeof: q, _context: a };
+ return (a.Consumer = a);
+};
+exports.createElement = J;
+exports.createFactory = function (a) {
+ var b = J.bind(null, a);
+ b.type = a;
+ return b;
+};
+exports.createRef = function () {
+ return { current: null };
+};
+exports.forwardRef = function (a) {
+ return { $$typeof: t, render: a };
+};
+exports.isValidElement = L;
+exports.lazy = function (a) {
+ return { $$typeof: v, _payload: { _status: -1, _result: a }, _init: Q };
+};
+exports.memo = function (a, b) {
+ return { $$typeof: u, type: a, compare: void 0 === b ? null : b };
+};
+exports.useCallback = function (a, b) {
+ return S().useCallback(a, b);
+};
+exports.useContext = function (a, b) {
+ return S().useContext(a, b);
+};
+exports.useDebugValue = function () {};
+exports.useEffect = function (a, b) {
+ return S().useEffect(a, b);
+};
+exports.useImperativeHandle = function (a, b, c) {
+ return S().useImperativeHandle(a, b, c);
+};
+exports.useLayoutEffect = function (a, b) {
+ return S().useLayoutEffect(a, b);
+};
+exports.useMemo = function (a, b) {
+ return S().useMemo(a, b);
+};
+exports.useReducer = function (a, b, c) {
+ return S().useReducer(a, b, c);
+};
+exports.useRef = function (a) {
+ return S().useRef(a);
+};
+exports.useState = function (a) {
+ return S().useState(a);
+};
+exports.version = "17.0.2";
diff --git a/src/test/fixtures/img-bug.js b/src/test/fixtures/img-bug.js
index 1d5ec8a82..89b6d464d 100644
--- a/src/test/fixtures/img-bug.js
+++ b/src/test/fixtures/img-bug.js
@@ -1,48 +1,59 @@
-const hi = () => ({
- its_ireelevant: () => true,
-});
-const hey = () => ({
- any_name_will_do: () => true,
-});
-
-// function getWidths(width, layout, sizes) {
-// if (sizes && (layout === "fill" || layout === "responsive")) {
-// // Find all the "vw" percent sizes used in the sizes prop
-// const percentSizes = [...sizes.matchAll(/(^|\s)(1?\d?\d)vw/g)].map((m) =>
-// parseInt(m[2])
-// );
-// if (percentSizes.length) {
-
-// // const smallestRatio = Math.min(...percentSizes) * 0.01;
-// // return {
-// // widths: allSizes.filter(
-// // (s) => s >= configDeviceSizes[0] * smallestRatio
-// // ),
-// // kind: "w",
-// // };
-// }
-// return { widths: allSizes, kind: "w" };
-// }
-// if (
-// typeof width !== "number" ||
-// layout === "fill" ||
-// layout === "responsive"
-// ) {
-// return { widths: configDeviceSizes, kind: "w" };
-// }
-// // const widths = [
-// // ...new Set( // > This means that most OLED screens that say they are 3x resolution,
-// // // > are actually 3x in the green color, but only 1.5x in the red and
-// // // > blue colors. Showing a 3x resolution image in the app vs a 2x
-// // // > resolution image will be visually the same, though the 3x image
-// // // > takes significantly more data. Even true 3x resolution screens are
-// // // > wasteful as the human eye cannot see that level of detail without
-// // // > something like a magnifying glass.
-// // // https://blog.twitter.com/engineering/en_us/topics/infrastructure/2019/capping-image-fidelity-on-ultra-high-resolution-devices.html
-// // [width, width * 2 /*, width * 3*/].map(
-// // (w) => allSizes.find((p) => p >= w) || allSizes[allSizes.length - 1]
-// // )
-// // ),
-// // ];
-// // return { widths, kind: "x" };
+function hey() {
+ const well = {
+ baz: function () {},
+ };
+}
+
+function yo() {
+ const hi = {
+ yo: function () {},
+ };
+}
+
+// function yo() {
+// const hi = {
+// yo: function () {},
+// };
// }
+
+// This bug is the same as function-scope-bug.jsx, except this time,
+// it's specific to scopes created in property definitions
+// That means, either arrow functions or non-arrow functions
+
+// ESBUILD
+// Scope: (5 -1) | Scope (5, -100)
+// Scope: (6 12) | Scope (6, 12)
+// Scope: (7 15) | Scope (7, 15)
+// Scope: (6 43) | Scope (6, 43)
+// Scope: (7 55) | Scope (7, 55)
+// Scope: (6 78) | Scope (6, 78)
+// Scope: (7 81) | Scope (7, 81)
+
+// Scope (6, 106)
+// Scope (7, 118)
+
+// Scope: (5 -1) | Scope (5, -100)
+// Scope: (6 12) | Scope (6, 12)
+// Scope: (7 15) | Scope (7, 15)
+// Scope: (6 43) | Scope (6, 43)
+// Scope: (7 55) | Scope (7, 55)
+// Scope: (6 78) | Scope (6, 78)
+// Scope: (7 81) | Scope (7, 81)
+// Scope: (6 106) | Scope (6, 106)
+// Scope: (7 118) | Scope (7, 118)
+
+// ESBUILD
+
+// Scope: (5 -1)
+// Scope: (6 12)
+// Scope: (7 15)
+// Scope: (6 43)
+// Scope: (7 55)
+// Scope: (6 78)
+// Scope: (7 81)
+// Scope: (6 106)
+// Scope: (7 118)
+// Scope: (6 141)
+// Scope: (7 144)
+// Scope: (6 169)
+// Scope: (7 181)
diff --git a/src/test/fixtures/react-dev-crash.js b/src/test/fixtures/react-dev-crash.js
new file mode 100644
index 000000000..1bed979a5
--- /dev/null
+++ b/src/test/fixtures/react-dev-crash.js
@@ -0,0 +1,108 @@
+try {
+ // This should throw.
+ if (construct) {
+ // Something should be setting the props in the constructor.
+ var Fake = function () {
+ throw Error();
+ }; // $FlowFixMe
+
+ Object.defineProperty(Fake.prototype, "props", {
+ set: function () {
+ // We use a throwing setter instead of frozen or non-writable props
+ // because that won't throw in a non-strict mode function.
+ throw Error();
+ },
+ });
+
+ if (typeof Reflect === "object" && Reflect.construct) {
+ // We construct a different control for this case to include any extra
+ // frames added by the construct call.
+ try {
+ Reflect.construct(Fake, []);
+ } catch (x) {
+ control = x;
+ }
+
+ Reflect.construct(fn, [], Fake);
+ } else {
+ try {
+ Fake.call();
+ } catch (x) {
+ control = x;
+ }
+
+ fn.call(Fake.prototype);
+ }
+ } else {
+ try {
+ throw Error();
+ } catch (x) {
+ control = x;
+ }
+
+ fn();
+ }
+} catch (sample) {
+ // This is inlined manually because closure doesn't do it for us.
+ if (sample && control && typeof sample.stack === "string") {
+ // This extracts the first frame from the sample that isn't also in the control.
+ // Skipping one frame that we assume is the frame that calls the two.
+ var sampleLines = sample.stack.split("\n");
+ var controlLines = control.stack.split("\n");
+ var s = sampleLines.length - 1;
+ var c = controlLines.length - 1;
+
+ while (s >= 1 && c >= 0 && sampleLines[s] !== controlLines[c]) {
+ // We expect at least one stack frame to be shared.
+ // Typically this will be the root most one. However, stack frames may be
+ // cut off due to maximum stack limits. In this case, one maybe cut off
+ // earlier than the other. We assume that the sample is longer or the same
+ // and there for cut off earlier. So we should find the root most frame in
+ // the sample somewhere in the control.
+ c--;
+ }
+
+ for (; s >= 1 && c >= 0; s--, c--) {
+ // Next we find the first one that isn't the same which should be the
+ // frame that called our sample function and the control.
+ if (sampleLines[s] !== controlLines[c]) {
+ // In V8, the first line is describing the message but other VMs don't.
+ // If we're about to return the first line, and the control is also on the same
+ // line, that's a pretty good indicator that our sample threw at same line as
+ // the control. I.e. before we entered the sample frame. So we ignore this result.
+ // This can happen if you passed a class to function component, or non-function.
+ if (s !== 1 || c !== 1) {
+ do {
+ s--;
+ c--; // We may still have similar intermediate frames from the construct call.
+ // The next one that isn't the same should be our match though.
+
+ if (c < 0 || sampleLines[s] !== controlLines[c]) {
+ // V8 adds a "new" prefix for native classes. Let's remove it to make it prettier.
+ var _frame = "\n" + sampleLines[s].replace(" at new ", " at ");
+
+ {
+ if (typeof fn === "function") {
+ componentFrameCache.set(fn, _frame);
+ }
+ } // Return the line we found.
+
+ return _frame;
+ }
+ } while (s >= 1 && c >= 0);
+ }
+
+ break;
+ }
+ }
+ }
+} finally {
+ reentry = false;
+
+ {
+ ReactCurrentDispatcher$1.current = previousDispatcher;
+ reenableLogs();
+ }
+
+ Error.prepareStackTrace = previousPrepareStackTrace;
+} // Fallback to just using the name if we couldn't make it throw.