aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-05-12 01:46:58 -0700
committerGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-05-12 01:46:58 -0700
commitf8131f42bcd039964586cbf3bd019dc9a449c438 (patch)
tree04af9a81309920ba04fd2e18d9f7fda3d615115b /src
parent175bbdd3c3ef13c3446fc5712e9dfcf96d387f0a (diff)
downloadbun-f8131f42bcd039964586cbf3bd019dc9a449c438.tar.gz
bun-f8131f42bcd039964586cbf3bd019dc9a449c438.tar.zst
bun-f8131f42bcd039964586cbf3bd019dc9a449c438.zip
okay
Former-commit-id: 2c20d88e8d0cf66b32daceb942ba9bf8514f5705
Diffstat (limited to 'src')
-rw-r--r--src/api/schema.d.ts1
-rw-r--r--src/api/schema.js10
-rw-r--r--src/api/schema.peechy2
-rw-r--r--src/api/schema.zig11
-rw-r--r--src/bundler.zig68
-rw-r--r--src/cache.zig14
-rw-r--r--src/fs.zig186
-rw-r--r--src/import_record.zig4
-rw-r--r--src/js_ast.zig1
-rw-r--r--src/logger.zig4
-rw-r--r--src/options.zig81
-rw-r--r--src/resolver/data_url.zig4
-rw-r--r--src/resolver/resolver.zig126
-rw-r--r--src/resolver/tsconfig_json.zig2
-rw-r--r--src/string_immutable.zig15
-rw-r--r--src/string_mutable.zig2
-rw-r--r--src/sync.zig1220
-rw-r--r--src/thread_safe_hash_map.zig35
18 files changed, 1602 insertions, 184 deletions
diff --git a/src/api/schema.d.ts b/src/api/schema.d.ts
index ff4b54a30..ce4521d6b 100644
--- a/src/api/schema.d.ts
+++ b/src/api/schema.d.ts
@@ -126,6 +126,7 @@ type uint32 = number;
loader_values?: Loader[];
main_fields?: string[];
platform?: Platform;
+ watch?: boolean;
}
export interface FileHandle {
diff --git a/src/api/schema.js b/src/api/schema.js
index 0986f0d0b..acd53ac51 100644
--- a/src/api/schema.js
+++ b/src/api/schema.js
@@ -227,6 +227,10 @@ function decodeTransformOptions(bb) {
result["platform"] = Platform[bb.readByte()];
break;
+ case 18:
+ result["watch"] = !!bb.readByte();
+ break;
+
default:
throw new Error("Attempted to parse invalid message");
}
@@ -382,6 +386,12 @@ bb.writeByte(encoded);
if (encoded === void 0) throw new Error("Invalid value " + JSON.stringify(value) + " for enum \"Platform\"");
bb.writeByte(encoded);
}
+
+ var value = message["watch"];
+ if (value != null) {
+ bb.writeByte(18);
+ bb.writeByte(value);
+ }
bb.writeByte(0);
}
diff --git a/src/api/schema.peechy b/src/api/schema.peechy
index 442dd3f84..f36a968b2 100644
--- a/src/api/schema.peechy
+++ b/src/api/schema.peechy
@@ -67,6 +67,8 @@ message TransformOptions {
string[] main_fields = 16;
Platform platform = 17;
+
+ bool watch = 18;
}
struct FileHandle {
diff --git a/src/api/schema.zig b/src/api/schema.zig
index b6f1a47f0..d32b8fb23 100644
--- a/src/api/schema.zig
+++ b/src/api/schema.zig
@@ -200,6 +200,9 @@ pub const Api = struct {
/// platform
platform: ?Platform = null,
+ /// watch
+ watch: ?bool = null,
+
pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!TransformOptions {
var obj = std.mem.zeroes(TransformOptions);
try update(&obj, allocator, reader);
@@ -374,6 +377,9 @@ pub const Api = struct {
17 => {
result.platform = try reader.readEnum(Platform, .Little);
},
+ 18 => {
+ result.watch = (try reader.readByte()) == @as(u8, 1);
+ },
else => {
return error.InvalidMessage;
},
@@ -534,6 +540,11 @@ pub const Api = struct {
try writer.writeByte(17);
try writer.writeIntNative(@TypeOf(@enumToInt(result.platform orelse unreachable)), @enumToInt(result.platform orelse unreachable));
}
+
+ if (result.watch) |watch| {
+ try writer.writeByte(18);
+ try writer.writeByte(@boolToInt(watch));
+ }
try writer.writeByte(0);
return;
}
diff --git a/src/bundler.zig b/src/bundler.zig
index b5f576e21..3050ac970 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -13,27 +13,67 @@ const linker = @import("linker.zig");
usingnamespace @import("ast/base.zig");
usingnamespace @import("defines.zig");
const panicky = @import("panic_handler.zig");
-const fs = @import("fs.zig");
+const Fs = @import("fs.zig");
const Api = @import("api/schema.zig").Api;
+const Resolver = @import("./resolver/resolver.zig");
+const sync = @import("sync.zig");
+const ThreadPool = sync.ThreadPool;
+const ThreadSafeHashMap = @import("./thread_safe_hash_map.zig");
+
+// pub const
+// const BundleMap =
pub const Bundler = struct {
options: options.BundleOptions,
log: *logger.Log,
allocator: *std.mem.Allocator,
- result: ?options.TransformResult = null,
+ result: options.TransformResult = undefined,
+ resolver: Resolver.Resolver,
+ fs: *Fs.FileSystem,
+ thread_pool: *ThreadPool,
+
+ // to_bundle:
+
+ // thread_pool: *ThreadPool,
pub fn init(
allocator: *std.mem.Allocator,
log: *logger.Log,
opts: Api.TransformOptions,
- ) !Bundler {}
+ ) !Bundler {
+ var fs = try Fs.FileSystem.init1(allocator, opts.absolute_working_dir, opts.watch orelse false);
+ const bundle_options = try options.BundleOptions.fromApi(allocator, fs, log, opts);
+ var pool = try allocator.create(ThreadPool);
+ try pool.init(ThreadPool.InitConfig{
+ .allocator = allocator,
+ });
+ return Bundler{
+ .options = bundle_options,
+ .fs = fs,
+ .allocator = allocator,
+ .resolver = Resolver.Resolver.init1(allocator, log, fs, bundle_options),
+ .log = log,
+ .thread_pool = pool,
+ };
+ }
pub fn bundle(
allocator: *std.mem.Allocator,
log: *logger.Log,
opts: Api.TransformOptions,
) !options.TransformResult {
- Global.notimpl();
+ var bundler = try Bundler.init(allocator, log, opts);
+
+ var entry_points = try allocator.alloc(Resolver.Resolver.Result, bundler.options.entry_points.len);
+ var entry_point_i: usize = 0;
+ for (bundler.options.entry_points) |entry| {
+ entry_points[entry_point_i] = bundler.resolver.resolve(bundler.fs.top_level_dir, entry, .entry_point) catch {
+ continue;
+ } orelse continue;
+ entry_point_i += 1;
+ }
+
+ return bundler.result;
}
};
@@ -88,11 +128,23 @@ pub const Transformer = struct {
var jsx = if (opts.jsx) |_jsx| options.JSX.Pragma.fromApi(_jsx) else options.JSX.Pragma{};
var output_i: usize = 0;
+ var chosen_alloc: *std.mem.Allocator = allocator;
+ var arena: std.heap.ArenaAllocator = undefined;
+ const watch = opts.watch orelse false;
+ const use_arenas = opts.entry_points.len > 8 or watch;
for (opts.entry_points) |entry_point, i| {
- var arena = std.heap.ArenaAllocator.init(allocator);
- var chosen_alloc = &arena.allocator;
- defer arena.deinit();
+ if (use_arenas) {
+ arena = std.heap.ArenaAllocator.init(allocator);
+ chosen_alloc = &arena.allocator;
+ }
+
+ defer {
+ if (use_arenas) {
+ arena.deinit();
+ }
+ }
+
var _log = logger.Log.init(allocator);
var __log = &_log;
var paths = [_]string{ cwd, entry_point };
@@ -110,7 +162,7 @@ pub const Transformer = struct {
chosen_alloc.free(absolutePath);
_log.appendTo(log) catch {};
}
- const _file = fs.File{ .path = fs.Path.init(entry_point), .contents = code };
+ const _file = Fs.File{ .path = Fs.Path.init(entry_point), .contents = code };
var source = try logger.Source.initFile(_file, chosen_alloc);
var loader: options.Loader = undefined;
if (use_default_loaders) {
diff --git a/src/cache.zig b/src/cache.zig
index e0df61ae8..505a4653c 100644
--- a/src/cache.zig
+++ b/src/cache.zig
@@ -14,6 +14,20 @@ pub const Cache = struct {
js: JavaScript,
fs: Fs,
json: Json,
+
+ pub fn init(allocator: *std.mem.Allocator) Set {
+ return Set{
+ .js = JavaScript{},
+ .fs = Fs{
+ .mutex = std.Thread.Mutex{},
+ .entries = std.StringHashMap(Fs.Entry).init(allocator),
+ },
+ .json = Json{
+ .mutex = std.Thread.Mutex{},
+ .entries = std.StringHashMap(*Json.Entry).init(allocator),
+ },
+ };
+ }
};
pub const Fs = struct {
mutex: std.Thread.Mutex,
diff --git a/src/fs.zig b/src/fs.zig
index bb52704ed..cdf8c520d 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -1,10 +1,10 @@
const std = @import("std");
-
usingnamespace @import("global.zig");
-
+const sync = @import("sync.zig");
const alloc = @import("alloc.zig");
const expect = std.testing.expect;
-const Mutex = std.Thread.Mutex;
+const Mutex = sync.Mutex;
+const Semaphore = sync.Semaphore;
const resolvePath = @import("./resolver/resolve_path.zig").resolvePath;
@@ -52,7 +52,7 @@ pub const FileSystem = struct {
}
pub const Err = struct {
- original_error: anyerror,
+ original_err: anyerror,
canonical_error: anyerror,
};
@@ -76,15 +76,15 @@ pub const FileSystem = struct {
}
const query = scratch_lookup_buffer[0 .. end + 1];
const result = entry.data.get(query) orelse return null;
- if (!strings.eql(dir.base, query)) {
- return Entry.Lookup{ .entry = result, .different_case = Entry.Lookup.DifferentCase{
+ if (!strings.eql(result.base, query)) {
+ return Entry.Lookup{ .entry = result, .diff_case = Entry.Lookup.DifferentCase{
.dir = entry.dir,
.query = _query,
.actual = result.base,
} };
}
- return Entry.Lookup{ .entry = entry };
+ return Entry.Lookup{ .entry = result, .diff_case = null };
}
};
@@ -97,7 +97,7 @@ pub const FileSystem = struct {
pub const Lookup = struct {
entry: *Entry,
- different_case: ?DifferentCase,
+ diff_case: ?DifferentCase,
pub const DifferentCase = struct {
dir: string,
@@ -109,13 +109,13 @@ pub const FileSystem = struct {
pub fn deinit(e: *Entry, allocator: *std.mem.Allocator) void {
allocator.free(e.base);
allocator.free(e.dir);
- allocator.free(e.cache.kind);
+ allocator.free(e.cache.symlink);
allocator.destroy(e);
}
pub const Cache = struct {
symlink: string = "",
- kind: Kind,
+ kind: Kind = Kind.file,
};
pub const Kind = enum {
@@ -123,22 +123,22 @@ pub const FileSystem = struct {
file,
};
- pub fn kind(entry: *Entry, fs: *Implementation) Kind {
- const held = entry.mutex.acquire();
- defer held.release();
+ pub fn kind(entry: *Entry, fs: *Implementation) !Kind {
+ entry.mutex.lock();
+ defer entry.mutex.unlock();
if (entry.need_stat) {
entry.need_stat = false;
- entry.cache = fs.kind(entry.dir, entry.base);
+ entry.cache = try fs.kind(entry.dir, entry.base);
}
return entry.cache.kind;
}
- pub fn symlink(entry: *Entry, fs: *Implementation) string {
- const held = entry.mutex.acquire();
- defer held.release();
+ pub fn symlink(entry: *Entry, fs: *Implementation) !string {
+ entry.mutex.lock();
+ defer entry.mutex.unlock();
if (entry.need_stat) {
entry.need_stat = false;
- entry.cache = fs.kind(entry.dir, entry.base);
+ entry.cache = try fs.kind(entry.dir, entry.base);
}
return entry.cache.symlink;
}
@@ -158,13 +158,13 @@ pub const FileSystem = struct {
// }
pub const RealFS = struct {
- entries_mutex: Mutex = Mutex{},
+ entries_mutex: Mutex = Mutex.init(),
entries: std.StringHashMap(EntriesOption),
allocator: *std.mem.Allocator,
do_not_cache_entries: bool = false,
limiter: Limiter,
watcher: ?std.StringHashMap(WatchData) = null,
- watcher_mutex: Mutex = Mutex{},
+ watcher_mutex: Mutex = Mutex.init(),
pub fn init(allocator: *std.mem.Allocator, enable_watcher: bool) RealFS {
return RealFS{
@@ -215,9 +215,9 @@ pub const FileSystem = struct {
};
fn modKeyError(fs: *RealFS, path: string, err: anyerror) !void {
- if (fs.watcher) |watcher| {
- const hold = watch_data.watch_mutex.acquire();
- defer hold.release();
+ if (fs.watcher) |*watcher| {
+ watch_data.watch_mutex.lock();
+ defer watch_data.watch_mutex.unlock();
var state = WatchData.State.file_missing;
switch (err) {
@@ -238,9 +238,9 @@ pub const FileSystem = struct {
defer fs.limiter.after();
const key = ModKey.generate(fs, path) catch |err| return fs.modKeyError(path, err);
- if (fs.watcher) |watcher| {
- const hold = fs.watcher_mutex.acquire();
- defer hold.release();
+ if (fs.watcher) |*watcher| {
+ fs.watcher_mutex.lock();
+ defer fs.watcher_mutex.unlock();
var entry = try watcher.getOrPutValue(path, WatchData{ .state = .file_has_mod_key, .mod_key = key });
entry.value.mod_key = key;
@@ -253,7 +253,7 @@ pub const FileSystem = struct {
dir_entries: []string = &([_]string{}),
file_contents: string = "",
mod_key: ModKey = ModKey{},
- watch_mutex: Mutex = Mutex{},
+ watch_mutex: Mutex = Mutex.init(),
state: State = State.none,
pub const State = enum {
@@ -279,51 +279,54 @@ pub const FileSystem = struct {
// Limit the number of files open simultaneously to avoid ulimit issues
pub const Limiter = struct {
- chan: ChannelVoid,
-
- pub const ChannelVoid = std.event.Channel(void);
-
- pub fn init(allocator: *std.mem.Allocator) !Limiter {
- var limiter = Limiter{ .chan = std.event.Channel(bool) };
- var buf = try allocator.create(bool, 32);
- limiter.chan.init(buf);
-
- return limiter;
+ semaphore: Semaphore,
+ pub fn init(allocator: *std.mem.Allocator) Limiter {
+ return Limiter{
+ .semaphore = Semaphore.init(32),
+ // .counter = std.atomic.Int(u8).init(0),
+ // .lock = std.Thread.Mutex.init(),
+ };
}
// This will block if the number of open files is already at the limit
pub fn before(limiter: *Limiter) void {
- limiter.chan.put(void);
+ limiter.semaphore.wait();
+ // var added = limiter.counter.fetchAdd(1);
}
pub fn after(limiter: *Limiter) void {
- _ = await limiter.chan.get();
+ limiter.semaphore.post();
+ // limiter.counter.decr();
+ // if (limiter.held) |hold| {
+ // hold.release();
+ // limiter.held = null;
+ // }
}
};
- fn readdir(fs: *RealFS, dir: string) !DirEntry {
+ fn readdir(fs: *RealFS, _dir: string) !DirEntry {
fs.limiter.before();
defer fs.limiter.after();
- var handle = try std.fs.openDirAbsolute(dir, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true });
+ var handle = try std.fs.openDirAbsolute(_dir, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true });
defer handle.close();
var iter: std.fs.Dir.Iterator = handle.iterate();
- var dir = DirEntry{ .data = DirEntry.EntryMap.init(fs.allocator) };
+ var dir = DirEntry{ .data = DirEntry.EntryMap.init(fs.allocator), .dir = _dir };
errdefer dir.deinit();
while (try iter.next()) |_entry| {
const entry: std.fs.Dir.Entry = _entry;
- var kind: Entry.Kind = undefined;
+ var _kind: Entry.Kind = undefined;
switch (entry.kind) {
- Directory => {
- kind = Entry.Kind.dir;
+ .Directory => {
+ _kind = Entry.Kind.dir;
},
- SymLink => {
+ .SymLink => {
// This might be wrong!
- kind = Entry.Kind.file;
+ _kind = Entry.Kind.file;
},
- File => {
- kind = Entry.Kind.file;
+ .File => {
+ _kind = Entry.Kind.file;
},
else => {
continue;
@@ -335,58 +338,64 @@ pub const FileSystem = struct {
for (entry.name) |c, i| {
name[i] = std.ascii.toLower(c);
}
- try dir.data.put(name, Entry{
+ var entry_ptr = try fs.allocator.create(Entry);
+ entry_ptr.* = Entry{
.base = name,
- .dir = dir,
- .mutex = Mutex{},
+ .dir = _dir,
+ .mutex = Mutex.init(),
// Call "stat" lazily for performance. The "@material-ui/icons" package
// contains a directory with over 11,000 entries in it and running "stat"
// for each entry was a big performance issue for that package.
.need_stat = true,
.cache = Entry.Cache{
.symlink = if (entry.kind == std.fs.Dir.Entry.Kind.SymLink) (try fs.allocator.dupe(u8, name)) else "",
- .kind = kind,
+ .kind = _kind,
},
- });
+ };
+
+ try dir.data.put(name, entry_ptr);
}
// Copy at the bottom here so in the event of an error, we don't deinit the dir string.
- dir.dir = dir;
+ dir.dir = _dir;
return dir;
}
fn readDirectoryError(fs: *RealFS, dir: string, err: anyerror) !void {
- if (fs.watcher) |watcher| {
- var hold = fs.watcher_mutex.acquire();
- defer hold.release();
+ if (fs.watcher) |*watcher| {
+ fs.watcher_mutex.lock();
+ defer fs.watcher_mutex.unlock();
try watcher.put(dir, WatchData{ .state = .dir_missing });
}
if (!fs.do_not_cache_entries) {
- var hold = fs.entries_mutex.acquire();
- defer hold.release();
+ fs.entries_mutex.lock();
+ defer fs.entries_mutex.unlock();
try fs.entries.put(dir, EntriesOption{
- .err = DirEntry.Err{ .original_err = err, .canonical_err = err },
+ .err = DirEntry.Err{ .original_err = err, .canonical_error = err },
});
}
}
pub fn readDirectory(fs: *RealFS, dir: string) !EntriesOption {
if (!fs.do_not_cache_entries) {
- var hold = fs.entries_mutex.acquire();
- defer hold.release();
+ fs.entries_mutex.lock();
+ defer fs.entries_mutex.unlock();
// First, check the cache
- if (fs.entries.get(dir)) |dir| {
- return EntriesOption{ .entries = dir };
+ if (fs.entries.get(dir)) |_dir| {
+ return EntriesOption{ .entries = _dir.entries };
}
}
// Cache miss: read the directory entries
- const entries = fs.readdir(dir) catch |err| return (try fs.readDirectoryError(dir, err));
+ const entries = fs.readdir(dir) catch |err| {
+ _ = fs.readDirectoryError(dir, err) catch {};
+ return err;
+ };
- if (fs.watcher) |watcher| {
- var hold = fs.watcher_mutex.acquire();
- defer hold.release();
+ if (fs.watcher) |*watcher| {
+ fs.watcher_mutex.lock();
+ defer fs.watcher_mutex.unlock();
var _entries = entries.data.items();
const names = try fs.allocator.alloc([]const u8, _entries.len);
for (_entries) |entry, i| {
@@ -400,22 +409,21 @@ pub const FileSystem = struct {
);
}
+ fs.entries_mutex.lock();
+ defer fs.entries_mutex.unlock();
+ const result = EntriesOption{
+ .entries = entries,
+ };
if (!fs.do_not_cache_entries) {
- var hold = fs.entries_mutex.acquire();
- defer hold.release();
-
- try fs.entries.put(dir, EntriesOption{
- .err = DirEntry.Err{ .original_err = err, .canonical_err = err },
- });
+ try fs.entries.put(dir, result);
}
-
- return entries;
+ return result;
}
fn readFileError(fs: *RealFS, path: string, err: anyerror) !void {
- if (fs.watcher) |watcher| {
- var hold = fs.watcher_mutex.acquire();
- defer hold.release();
+ if (fs.watcher) |*watcher| {
+ fs.watcher_mutex.lock();
+ defer fs.watcher_mutex.unlock();
var res = try watcher.getOrPutValue(path, WatchData{ .state = .file_missing });
res.value.state = .file_missing;
}
@@ -434,7 +442,7 @@ pub const FileSystem = struct {
const size = _size orelse (try file.getEndPos() catch |err| return fs.readFileError(path, err));
const file_contents: []u8 = file.readToEndAllocOptions(fs.allocator, size, size, @alignOf(u8), null) catch |err| return fs.readFileError(path, err);
- if (fs.watcher) |watcher| {
+ if (fs.watcher) |*watcher| {
var hold = fs.watcher_mutex.acquire();
defer hold.release();
var res = try watcher.getOrPutValue(path, WatchData{});
@@ -457,19 +465,19 @@ pub const FileSystem = struct {
const file = try std.fs.openFileAbsolute(entry_path, .{ .read = true, .write = false });
defer file.close();
const stat = try file.stat();
- var kind = stat.kind;
+ var _kind = stat.kind;
var cache = Entry.Cache{ .kind = Entry.Kind.file, .symlink = "" };
- if (kind == .Symlink) {
+ if (_kind == .SymLink) {
// windows has a max filepath of 255 chars
// we give it a little longer for other platforms
var out_buffer = [_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- var out_slice = &out_buffer;
+ var out_slice = &(out_buffer);
var symlink = entry_path;
var links_walked: u8 = 0;
while (links_walked < 255) : (links_walked += 1) {
- var link = try std.os.readLink(symlink, out_buffer);
+ var link = try std.os.readlink(symlink, out_slice);
if (!std.fs.path.isAbsolute(link)) {
combo[0] = dir;
@@ -482,7 +490,7 @@ pub const FileSystem = struct {
// TODO: do we need to clean the path?
symlink = link;
- const file2 = std.fs.openFileAbsolute(symlink, File.OpenFlags{ .read = true, .write = false }) catch return cache;
+ const file2 = std.fs.openFileAbsolute(symlink, std.fs.File.OpenFlags{ .read = true, .write = false }) catch return cache;
defer file2.close();
const stat2 = file2.stat() catch return cache;
@@ -501,11 +509,11 @@ pub const FileSystem = struct {
}
if (mode == .Directory) {
- kind = Entry.Kind.dir;
+ _kind = Entry.Kind.dir;
} else {
- kind = Entry.Kind.file;
+ _kind = Entry.Kind.file;
}
- cache.kind = kind;
+ cache.kind = _kind;
cache.symlink = symlink;
return cache;
diff --git a/src/import_record.zig b/src/import_record.zig
index 76af22dec..7b1acfd1b 100644
--- a/src/import_record.zig
+++ b/src/import_record.zig
@@ -28,6 +28,10 @@ pub const ImportKind = enum(u8) {
// A CSS "url(...)" token
url,
+
+ pub fn isFromCSS(k: ImportKind) bool {
+ return k == .at_conditional or k == .at or k == .url;
+ }
};
pub const ImportRecord = struct {
diff --git a/src/js_ast.zig b/src/js_ast.zig
index e7bf82dfa..6a93310a2 100644
--- a/src/js_ast.zig
+++ b/src/js_ast.zig
@@ -360,7 +360,6 @@ pub const G = struct {
flags: Flags.Function = Flags.Function.None,
};
-
pub const Arg = struct {
ts_decorators: ?ExprNodeList = null,
binding: BindingNodeIndex,
diff --git a/src/logger.zig b/src/logger.zig
index 77fad57c0..ec93e52cc 100644
--- a/src/logger.zig
+++ b/src/logger.zig
@@ -14,6 +14,7 @@ pub const Kind = enum {
warn,
note,
debug,
+ verbose,
pub fn string(self: Kind) string {
return switch (self) {
@@ -21,6 +22,7 @@ pub const Kind = enum {
.warn => "warn",
.note => "note",
.debug => "debug",
+ .verbose => "verbose",
};
}
};
@@ -230,7 +232,7 @@ pub const Log = struct {
self.msgs.deinit();
}
- pub fn addVerboseWithNotes(source: ?*Source, loc: Loc, text: string, notes: []Data) !void {
+ pub fn addVerboseWithNotes(log: *Log, source: ?*Source, loc: Loc, text: string, notes: []Data) !void {
try log.addMsg(Msg{
.kind = .verbose,
.data = rangeData(source, Range{ .loc = loc }, text),
diff --git a/src/options.zig b/src/options.zig
index a3f55a368..29be30cca 100644
--- a/src/options.zig
+++ b/src/options.zig
@@ -1,6 +1,6 @@
const std = @import("std");
const logger = @import("logger.zig");
-const fs = @import("fs.zig");
+const Fs = @import("fs.zig");
const alloc = @import("alloc.zig");
const resolver = @import("./resolver/resolver.zig");
const api = @import("./api/schema.zig");
@@ -11,13 +11,14 @@ usingnamespace @import("global.zig");
const assert = std.debug.assert;
-pub fn validatePath(log: *logger.Log, fs: *fs.FileSystem.Implementation, cwd: string, rel_path: string, allocator: *std.mem.Allocator, path_kind: string) string {
+pub fn validatePath(log: *logger.Log, fs: *Fs.FileSystem.Implementation, cwd: string, rel_path: string, allocator: *std.mem.Allocator, path_kind: string) string {
if (rel_path.len == 0) {
return "";
}
const paths = [_]string{ cwd, rel_path };
- const out = std.fs.path.resolve(allocator, &path) catch |err| {
+ const out = std.fs.path.resolve(allocator, &paths) catch |err| {
log.addErrorFmt(null, logger.Loc{}, allocator, "Invalid {s}: {s}", .{ path_kind, rel_path }) catch unreachable;
+ Global.panic("", .{});
};
return out;
@@ -42,7 +43,7 @@ pub const ExternalModules = struct {
suffix: string,
};
- pub fn init(allocator: *std.mem.Allocator, fs: *fs.FileSystem.Implementation, cwd: string, externals: []string, log: *logger.Log) ExternalModules {
+ pub fn init(allocator: *std.mem.Allocator, fs: *Fs.FileSystem.Implementation, cwd: string, externals: []const string, log: *logger.Log) ExternalModules {
var result = ExternalModules{
.node_modules = std.BufSet.init(allocator),
.abs_paths = std.BufSet.init(allocator),
@@ -56,9 +57,10 @@ pub const ExternalModules = struct {
var patterns = std.ArrayList(WildcardPattern).init(allocator);
for (externals) |external| {
+ const path = external;
if (strings.indexOfChar(path, '*')) |i| {
if (strings.indexOfChar(path[i + 1 .. path.len], '*') != null) {
- log.addErrorFmt(null, .empty, allocator, "External path \"{s}\" cannot have more than one \"*\" wildcard", .{external}) catch unreachable;
+ log.addErrorFmt(null, logger.Loc.Empty, allocator, "External path \"{s}\" cannot have more than one \"*\" wildcard", .{external}) catch unreachable;
return result;
}
@@ -100,8 +102,8 @@ pub const Platform = enum {
neutral,
const MAIN_FIELD_NAMES = [_]string{ "browser", "module", "main" };
- pub const DefaultMainFields: std.EnumArray(Platform, []string) = comptime {
- var array = std.EnumArray(Platform, []string).initUndefined();
+ pub const DefaultMainFields: std.EnumArray(Platform, []const string) = comptime {
+ var array = std.EnumArray(Platform, []const string).initUndefined();
// Note that this means if a package specifies "module" and "main", the ES6
// module will not be selected. This means tree shaking will not work when
@@ -234,7 +236,7 @@ const TypeScript = struct {
pub const BundleOptions = struct {
footer: string = "",
banner: string = "",
- define: defines.Define,
+ define: *defines.Define,
loaders: std.StringHashMap(Loader),
resolve_dir: string = "/",
jsx: JSX.Pragma = JSX.Pragma{},
@@ -247,46 +249,61 @@ pub const BundleOptions = struct {
resolve_mode: api.Api.ResolveMode,
tsconfig_override: ?string = null,
platform: Platform = Platform.browser,
- main_fields: []string = Platform.DefaultMainFields.get(Platform.browser),
+ main_fields: []const string = Platform.DefaultMainFields.get(Platform.browser),
log: *logger.Log,
- external: ExternalModules,
- entry_points: []string,
+ external: ExternalModules = ExternalModules{},
+ entry_points: []const string,
pub fn fromApi(
allocator: *std.mem.Allocator,
- fs: *fs.FileSystem,
+ fs: *Fs.FileSystem,
+ log: *logger.Log,
transform: Api.TransformOptions,
) !BundleOptions {
- var log = logger.Log.init(allocator);
- var opts: BundleOptions = std.mem.zeroes(BundleOptions);
-
- opts.write = transform.write;
- if (transform.jsx) |jsx| {
- opts.jsx = JSX.Pragma.fromApi(jsx);
+ var loader_values = try allocator.alloc(Loader, transform.loader_values.len);
+ for (loader_values) |_, i| {
+ const loader = switch (transform.loader_values[i]) {
+ .jsx => Loader.jsx,
+ .js => Loader.js,
+ .ts => Loader.ts,
+ .css => Loader.css,
+ .tsx => Loader.tsx,
+ .json => Loader.json,
+ else => unreachable,
+ };
+
+ loader_values[i] = loader;
}
-
- options.loaders = try stringHashMapFromArrays(std.StringHashMap(Loader), allocator, transform.loader_keys, transform.loader_values);
var user_defines = try stringHashMapFromArrays(defines.RawDefines, allocator, transform.define_keys, transform.define_values);
-
if (transform.define_keys.len == 0) {
try user_defines.put("process.env.NODE_ENV", "development");
}
var resolved_defines = try defines.DefineData.from_input(user_defines, log, allocator);
- options.defines = try defines.Define.init(
- allocator,
- );
- if (transform.external.len > 0) {
- opts.external = try ExternalModules.init(allocator, opts.fs, opts.fs.top_level_dir, transform.external, &log);
+ var opts: BundleOptions = BundleOptions{
+ .log = log,
+ .resolve_mode = transform.resolve orelse .dev,
+ .define = try defines.Define.init(
+ allocator,
+ resolved_defines,
+ ),
+ .loaders = try stringHashMapFromArrays(std.StringHashMap(Loader), allocator, transform.loader_keys, loader_values),
+ .write = transform.write orelse false,
+ .external = ExternalModules.init(allocator, &fs.fs, fs.top_level_dir, transform.external, log),
+ .entry_points = transform.entry_points,
+ };
+
+ if (transform.jsx) |jsx| {
+ opts.jsx = JSX.Pragma.fromApi(jsx);
}
if (transform.platform) |plat| {
- opts.platform = plat;
- opts.main_fields = Platform.DefaultMainFields.get(plat);
+ opts.platform = if (plat == .browser) .browser else .node;
+ opts.main_fields = Platform.DefaultMainFields.get(opts.platform);
}
if (transform.main_fields.len > 0) {
- options.main_fields = transform.main_fields;
+ opts.main_fields = transform.main_fields;
}
return opts;
@@ -304,7 +321,7 @@ pub const TransformOptions = struct {
inject: ?[]string = null,
public_url: string = "/",
preserve_symlinks: bool = false,
- entry_point: fs.File,
+ entry_point: Fs.File,
resolve_paths: bool = false,
tsconfig_override: ?string = null,
@@ -314,8 +331,8 @@ pub const TransformOptions = struct {
pub fn initUncached(allocator: *std.mem.Allocator, entryPointName: string, code: string) !TransformOptions {
assert(entryPointName.len > 0);
- var entryPoint = fs.File{
- .path = fs.Path.init(entryPointName),
+ var entryPoint = Fs.File{
+ .path = Fs.Path.init(entryPointName),
.contents = code,
};
diff --git a/src/resolver/data_url.zig b/src/resolver/data_url.zig
index 48076521b..dd443dbe8 100644
--- a/src/resolver/data_url.zig
+++ b/src/resolver/data_url.zig
@@ -119,7 +119,7 @@ pub const MimeType = enum {
pub const DataURL = struct {
mime_type: string,
data: string,
- is_base64: bool,
+ is_base64: bool = false,
pub fn parse(url: string) ?DataURL {
if (!strings.startsWith(url, "data:")) {
@@ -129,7 +129,7 @@ pub const DataURL = struct {
const comma = strings.indexOfChar(url, ',') orelse return null;
var parsed = DataURL{
- .mime_type = url["data:"..comma],
+ .mime_type = url["data:".len..comma],
.data = url[comma + 1 .. url.len],
};
diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig
index d18d82d6c..340648399 100644
--- a/src/resolver/resolver.zig
+++ b/src/resolver/resolver.zig
@@ -1,5 +1,5 @@
usingnamespace @import("../global.zig");
-const ast = @import("../ast.zig");
+const ast = @import("../import_record.zig");
const logger = @import("../logger.zig");
const options = @import("../options.zig");
const fs = @import("../fs.zig");
@@ -28,19 +28,19 @@ pub const DirInfo = struct {
// A pointer to the enclosing dirInfo with a valid "browser" field in
// package.json. We need this to remap paths after they have been resolved.
- enclosing_browser_scope: *?DirInfo = null,
+ enclosing_browser_scope: ?*DirInfo = null,
abs_path: string,
entries: fs.FileSystem.DirEntry,
has_node_modules: bool = false, // Is there a "node_modules" subdirectory?
- package_json: ?*PackageJSON, // Is there a "package.json" file?
- ts_config_json: ?*TSConfigJSON, // Is there a "tsconfig.json" file in this directory or a parent directory?
+ package_json: ?*PackageJSON = null, // Is there a "package.json" file?
+ ts_config_json: ?*TSConfigJSON = null, // Is there a "tsconfig.json" file in this directory or a parent directory?
abs_real_path: string = "", // If non-empty, this is the real absolute path resolving any symlinks
};
pub const Resolver = struct {
- opts: options.TransformOptions,
+ opts: options.BundleOptions,
fs: *fs.FileSystem,
log: *logger.Log,
allocator: *std.mem.Allocator,
@@ -93,6 +93,23 @@ pub const Resolver = struct {
// all parent directories
dir_cache: std.StringHashMap(?*DirInfo),
+ pub fn init1(
+ allocator: *std.mem.Allocator,
+ log: *logger.Log,
+ _fs: *fs.FileSystem,
+ opts: options.BundleOptions,
+ ) Resolver {
+ return Resolver{
+ .allocator = allocator,
+ .dir_cache = std.StringHashMap(?*DirInfo).init(allocator),
+ .mutex = std.Thread.Mutex{},
+ .caches = cache.Cache.Set.init(allocator),
+ .opts = opts,
+ .fs = _fs,
+ .log = log,
+ };
+ }
+
pub const DebugLogs = struct {
what: string = "",
indent: MutableString,
@@ -100,9 +117,10 @@ pub const Resolver = struct {
pub const FlushMode = enum { fail, success };
- pub fn init(allocator: *std.mem.Allocator) DebugLogs {
- return .{
- .indent = MutableString.init(allocator, 0),
+ pub fn init(allocator: *std.mem.Allocator) !DebugLogs {
+ var mutable = try MutableString.init(allocator, 0);
+ return DebugLogs{
+ .indent = mutable,
.notes = std.ArrayList(logger.Data).init(allocator),
};
}
@@ -110,7 +128,7 @@ pub const Resolver = struct {
pub fn deinit(d: DebugLogs) void {
var allocator = d.notes.allocator;
d.notes.deinit();
- d.indent.deinit();
+ // d.indent.deinit();
}
pub fn increaseIndent(d: *DebugLogs) !void {
@@ -125,9 +143,9 @@ pub const Resolver = struct {
var text = _text;
const len = d.indent.len();
if (len > 0) {
- text = try d.notes.allocator.alloc(u8, text.len + d.indent.len);
- std.mem.copy(u8, text, d.indent);
- std.mem.copy(u8, text[d.indent.len..text.len], _text);
+ var __text = try d.notes.allocator.alloc(u8, text.len + len);
+ std.mem.copy(u8, __text, d.indent.list.items);
+ std.mem.copy(u8, __text[len..text.len], _text);
d.notes.allocator.free(_text);
}
@@ -151,7 +169,7 @@ pub const Resolver = struct {
is_external: bool = false,
- different_case: ?fs.FileSystem.Entry.Lookup.DifferentCase = null,
+ diff_case: ?fs.FileSystem.Entry.Lookup.DifferentCase = null,
// If present, any ES6 imports to this file can be considered to have no side
// effects. This means they should be removed if unused.
@@ -166,7 +184,7 @@ pub const Resolver = struct {
preserve_unused_imports_ts: bool = false,
// This is the "type" field from "package.json"
- module_type: options.ModuleType,
+ module_type: options.ModuleType = options.ModuleType.unknown,
debug_meta: ?DebugMeta = null,
@@ -200,16 +218,16 @@ pub const Resolver = struct {
}
pub fn flushDebugLogs(r: *Resolver, flush_mode: DebugLogs.FlushMode) !void {
- if (r.debug_logs) |debug| {
+ if (r.debug_logs) |*debug| {
defer {
debug.deinit();
r.debug_logs = null;
}
- if (mode == .failure) {
- try r.log.addRangeDebugWithNotes(null, .empty, debug.what, debug.notes.toOwnedSlice());
+ if (flush_mode == DebugLogs.FlushMode.fail) {
+ try r.log.addRangeDebugWithNotes(null, logger.Range{ .loc = logger.Loc{} }, debug.what, debug.notes.toOwnedSlice());
} else if (@enumToInt(r.log.level) <= @enumToInt(logger.Log.Level.verbose)) {
- try r.log.addVerboseWithNotes(null, .empty, debug.what, debug.notes.toOwnedSlice());
+ try r.log.addVerboseWithNotes(null, logger.Loc.Empty, debug.what, debug.notes.toOwnedSlice());
}
}
}
@@ -220,7 +238,7 @@ pub const Resolver = struct {
r.debug_logs.?.deinit();
}
- r.debug_logs = DebugLogs.init(r.allocator);
+ r.debug_logs = try DebugLogs.init(r.allocator);
}
// Certain types of URLs default to being external for convenience
@@ -237,35 +255,38 @@ pub const Resolver = struct {
// "background: url(//example.com/images/image.png);"
strings.startsWith(import_path, "//"))
{
- if (r.debug_logs) |debug| {
+ if (r.debug_logs) |*debug| {
try debug.addNote("Marking this path as implicitly external");
}
r.flushDebugLogs(.success) catch {};
- return Result{ .path_pair = PathPair{
- .primary = Path{ .text = import_path },
+ return Result{
+ .path_pair = PathPair{
+ .primary = Path.init(import_path),
+ },
.is_external = true,
- } };
+ .module_type = .esm,
+ };
}
- if (DataURL.parse(import_path) catch null) |_data_url| {
+ if (DataURL.parse(import_path)) |_data_url| {
const data_url: DataURL = _data_url;
// "import 'data:text/javascript,console.log(123)';"
// "@import 'data:text/css,body{background:white}';"
if (data_url.decode_mime_type() != .Unsupported) {
- if (r.debug_logs) |debug| {
+ if (r.debug_logs) |*debug| {
debug.addNote("Putting this path in the \"dataurl\" namespace") catch {};
}
r.flushDebugLogs(.success) catch {};
- return Resolver.Result{ .path_pair = PathPair{ .primary = Path{ .text = import_path, .namespace = "dataurl" } } };
+ return Resolver.Result{ .path_pair = PathPair{ .primary = Path.initWithNamespace(import_path, "dataurl") } };
}
// "background: url(data:image/png;base64,iVBORw0KGgo=);"
- if (r.debug_logs) |debug| {
+ if (r.debug_logs) |*debug| {
debug.addNote("Marking this \"dataurl\" as external") catch {};
}
r.flushDebugLogs(.success) catch {};
return Resolver.Result{
- .path_pair = PathPair{ .primary = Path{ .text = import_path, .namespace = "dataurl" } },
+ .path_pair = PathPair{ .primary = Path.initWithNamespace(import_path, "dataurl") },
.is_external = true,
};
}
@@ -273,7 +294,7 @@ pub const Resolver = struct {
// Fail now if there is no directory to resolve in. This can happen for
// virtual modules (e.g. stdin) if a resolve directory is not specified.
if (source_dir.len == 0) {
- if (r.debug_logs) |debug| {
+ if (r.debug_logs) |*debug| {
debug.addNote("Cannot resolve this path without a directory") catch {};
}
r.flushDebugLogs(.fail) catch {};
@@ -282,6 +303,10 @@ pub const Resolver = struct {
const hold = r.mutex.acquire();
defer hold.release();
+
+ var result = try r.resolveWithoutSymlinks(source_dir, import_path, kind);
+
+ return result;
}
pub fn resolveWithoutSymlinks(r: *Resolver, source_dir: string, import_path: string, kind: ast.ImportKind) !Result {
@@ -298,24 +323,24 @@ pub const Resolver = struct {
// experience unexpected build failures later on other operating systems.
// Treating these paths as absolute paths on all platforms means Windows
// users will not be able to accidentally make use of these paths.
- if (striongs.startsWith(import_path, "/") or std.fs.path.isAbsolutePosix(import_path)) {
- if (r.debug_logs) |debug| {
+ if (strings.startsWith(import_path, "/") or std.fs.path.isAbsolutePosix(import_path)) {
+ if (r.debug_logs) |*debug| {
debug.addNoteFmt("The import \"{s}\" is being treated as an absolute path", .{import_path}) catch {};
}
// First, check path overrides from the nearest enclosing TypeScript "tsconfig.json" file
- if (try r.dirInfoCached(source_dir)) |_dir_info| {
+ if ((r.dirInfoCached(source_dir) catch null)) |_dir_info| {
const dir_info: *DirInfo = _dir_info;
if (dir_info.ts_config_json) |tsconfig| {
- if (tsconfig.paths.size() > 0) {
+ if (tsconfig.paths.count() > 0) {
const res = r.matchTSConfigPaths(tsconfig, import_path, kind);
return Result{ .path_pair = res.path_pair, .diff_case = res.diff_case };
}
}
}
-
-
}
+
+ return result;
}
pub const TSConfigExtender = struct {
@@ -386,7 +411,7 @@ pub const Resolver = struct {
}
// TODO:
- pub fn prettyPath(r: *Resolver, path: Ptah) string {
+ pub fn prettyPath(r: *Resolver, path: Path) string {
return path.text;
}
@@ -399,10 +424,12 @@ pub const Resolver = struct {
return path[0] != '/' and !strings.startsWith(path, "./") and !strings.startsWith(path, "../") and !strings.eql(path, ".") and !strings.eql(path, "..");
}
- fn dirInfoCached(r: *Resolver, path: string) !*DirInfo {
+ fn dirInfoCached(r: *Resolver, path: string) !?*DirInfo {
const info = r.dir_cache.get(path) orelse try r.dirInfoUncached(path);
try r.dir_cache.put(path, info);
+
+ return info;
}
pub const MatchResult = struct {
@@ -415,12 +442,12 @@ pub const Resolver = struct {
Global.notimpl();
}
- fn dirInfoUncached(r: *Resolver, path: string) !?*DirInfo {
- const rfs: r.fs.RealFS = r.fs.fs;
+ fn dirInfoUncached(r: *Resolver, path: string) anyerror!?*DirInfo {
+ var rfs: *fs.FileSystem.RealFS = &r.fs.fs;
var parent: ?*DirInfo = null;
const parent_dir = std.fs.path.dirname(path) orelse return null;
if (!strings.eql(parent_dir, path)) {
- parent = r.dirInfoCached(parent_dir);
+ parent = try r.dirInfoCached(parent_dir);
}
// List the directories
@@ -431,7 +458,7 @@ pub const Resolver = struct {
// case on Unix for directories that only have the execute permission bit
// set. It means we will just pass through the empty directory and
// continue to check the directories above it, which is now node behaves.
- switch (_entries.err) {
+ switch (_entries.err.original_err) {
error.EACCESS => {
entries = fs.FileSystem.DirEntry.empty(path, r.allocator);
},
@@ -447,7 +474,7 @@ pub const Resolver = struct {
error.ENOTDIR,
=> {},
else => {
- const pretty = r.prettyPath(fs.Path{ .text = path, .namespace = "file" });
+ const pretty = r.prettyPath(Path.init(path));
r.log.addErrorFmt(
null,
logger.Loc{},
@@ -455,9 +482,9 @@ pub const Resolver = struct {
"Cannot read directory \"{s}\": {s}",
.{
pretty,
- @errorName(err),
+ @errorName(_entries.err.original_err),
},
- );
+ ) catch {};
return null;
},
}
@@ -468,7 +495,7 @@ pub const Resolver = struct {
var info = try r.allocator.create(DirInfo);
info.* = DirInfo{
.abs_path = path,
- .parent = parent_dir,
+ .parent = parent,
.entries = entries,
};
@@ -476,7 +503,8 @@ pub const Resolver = struct {
var base = std.fs.path.basename(path);
if (!strings.eqlComptime(base, "node_modules")) {
if (entries.get("node_modules")) |entry| {
- info.has_node_modules = entry.entry.kind(rfs) == .dir;
+ // the catch might be wrong!
+ info.has_node_modules = (entry.entry.kind(rfs) catch .file) == .dir;
}
}
@@ -547,12 +575,12 @@ pub const Resolver = struct {
var visited = std.StringHashMap(bool).init(r.allocator);
defer visited.deinit();
info.ts_config_json = r.parseTSConfig(tsconfigpath, visited) catch |err| {
- const pretty = r.prettyPath(fs.Path{ .text = tsconfigpath, .namespace = "file" });
+ const pretty = r.prettyPath(Path.init(tsconfigpath));
if (err == error.ENOENT) {
- r.log.addErrorFmt(null, .empty, r.allocator, "Cannot find tsconfig file \"{s}\"", .{pretty});
+ r.log.addErrorFmt(null, logger.Loc.Empty, r.allocator, "Cannot find tsconfig file \"{s}\"", .{pretty});
} else if (err != error.ParseErrorAlreadyLogged) {
- r.log.addErrorFmt(null, .empty, r.allocator, "Cannot read file \"{s}\": {s}", .{ pretty, @errorName(err) });
+ r.log.addErrorFmt(null, logger.Loc.Empty, r.allocator, "Cannot read file \"{s}\": {s}", .{ pretty, @errorName(err) });
}
};
}
diff --git a/src/resolver/tsconfig_json.zig b/src/resolver/tsconfig_json.zig
index dd952b65f..b4e027413 100644
--- a/src/resolver/tsconfig_json.zig
+++ b/src/resolver/tsconfig_json.zig
@@ -19,7 +19,7 @@ pub const TSConfigJSON = struct {
// "BaseURL" is missing, in which case it is as if "BaseURL" was ".". This
// is to implement the "paths without baseUrl" feature from TypeScript 4.1.
// More info: https://github.com/microsoft/TypeScript/issues/31869
- base_url_for_paths = "",
+ base_url_for_paths: string = "",
// The verbatim values of "compilerOptions.paths". The keys are patterns to
// match and the values are arrays of fallback paths to search. Each key and
diff --git a/src/string_immutable.zig b/src/string_immutable.zig
index bac82f9f2..55d94917b 100644
--- a/src/string_immutable.zig
+++ b/src/string_immutable.zig
@@ -45,6 +45,21 @@ pub fn startsWith(self: string, str: string) bool {
return true;
}
+pub fn endsWith(self: string, str: string) bool {
+ if (str.len > self.len) {
+ return false;
+ }
+
+ var i: usize = str.len - 1;
+ while (i > 0) : (i -= 1) {
+ if (str[i] != self[i]) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
pub fn endsWithAny(self: string, str: string) bool {
const end = self[self.len - 1];
for (str) |char| {
diff --git a/src/string_mutable.zig b/src/string_mutable.zig
index 2f23ed5ed..64ebf7601 100644
--- a/src/string_mutable.zig
+++ b/src/string_mutable.zig
@@ -88,7 +88,7 @@ pub const MutableString = struct {
return str;
}
- pub fn len(self: *MutableString) usize {
+ pub fn len(self: *const MutableString) usize {
return self.list.items.len;
}
diff --git a/src/sync.zig b/src/sync.zig
new file mode 100644
index 000000000..80e20cfa1
--- /dev/null
+++ b/src/sync.zig
@@ -0,0 +1,1220 @@
+const std = @import("std");
+
+// https://gist.github.com/kprotty/0d2dc3da4840341d6ff361b27bdac7dc
+pub const ThreadPool = struct {
+ state: usize = 0,
+ spawned: usize = 0,
+ run_queue: Queue,
+ idle_semaphore: Semaphore,
+ allocator: *std.mem.Allocator,
+ workers: []Worker = &[_]Worker{},
+
+ pub const InitConfig = struct {
+ allocator: ?*std.mem.Allocator = null,
+ max_threads: ?usize = null,
+
+ var default_gpa = std.heap.GeneralPurposeAllocator(.{}){};
+ var default_allocator = &default_gpa.allocator;
+ };
+
+ pub fn init(self: *ThreadPool, config: InitConfig) !void {
+ self.* = ThreadPool{
+ .run_queue = Queue.init(),
+ .idle_semaphore = Semaphore.init(0),
+ .allocator = config.allocator orelse InitConfig.default_allocator,
+ };
+
+ errdefer self.deinit();
+
+ const num_workers = std.math.max(1, config.max_threads orelse std.Thread.cpuCount() catch 1);
+ self.workers = try self.allocator.alloc(Worker, num_workers);
+
+ for (self.workers) |*worker| {
+ try worker.init(self);
+ @atomicStore(usize, &self.spawned, self.spawned + 1, .SeqCst);
+ }
+ }
+
+ pub fn deinit(self: *ThreadPool) void {
+ self.shutdown();
+
+ for (self.workers[0..self.spawned]) |*worker|
+ worker.deinit();
+
+ while (self.run_queue.pop()) |run_node|
+ (run_node.data.runFn)(&run_node.data);
+
+ self.allocator.free(self.workers);
+ self.idle_semaphore.deinit();
+ self.run_queue.deinit();
+ self.* = undefined;
+ }
+
+ pub fn spawn(self: *ThreadPool, comptime func: anytype, args: anytype) !void {
+ const Args = @TypeOf(args);
+ const Closure = struct {
+ func_args: Args,
+ allocator: *std.mem.Allocator,
+ run_node: RunNode = .{ .data = .{ .runFn = runFn } },
+
+ fn runFn(runnable: *Runnable) void {
+ const run_node = @fieldParentPtr(RunNode, "data", runnable);
+ const closure = @fieldParentPtr(@This(), "run_node", run_node);
+ const result = @call(.{}, func, closure.func_args);
+ closure.allocator.destroy(closure);
+ }
+ };
+
+ const allocator = self.allocator;
+ const closure = try allocator.create(Closure);
+ errdefer allocator.free(closure);
+ closure.* = Closure{
+ .func_args = args,
+ .allocator = allocator,
+ };
+
+ const run_node = &closure.run_node;
+ if (Worker.current) |worker| {
+ worker.run_queue.push(run_node);
+ } else {
+ self.run_queue.push(run_node);
+ }
+
+ self.notify();
+ }
+
+ const State = struct {
+ is_shutdown: bool = false,
+ is_notified: bool = false,
+ idle_workers: usize = 0,
+
+ fn pack(self: State) usize {
+ return ((@as(usize, @boolToInt(self.is_shutdown)) << 0) |
+ (@as(usize, @boolToInt(self.is_notified)) << 1) |
+ (self.idle_workers << 2));
+ }
+
+ fn unpack(value: usize) State {
+ return State{
+ .is_shutdown = value & (1 << 0) != 0,
+ .is_notified = value & (1 << 1) != 0,
+ .idle_workers = value >> 2,
+ };
+ }
+ };
+
+ fn wait(self: *ThreadPool) error{Shutdown}!void {
+ var state = State.unpack(@atomicLoad(usize, &self.state, .SeqCst));
+ while (true) {
+ if (state.is_shutdown)
+ return error.Shutdown;
+
+ var new_state = state;
+ if (state.is_notified) {
+ new_state.is_notified = false;
+ } else {
+ new_state.idle_workers += 1;
+ }
+
+ if (@cmpxchgWeak(
+ usize,
+ &self.state,
+ state.pack(),
+ new_state.pack(),
+ .SeqCst,
+ .SeqCst,
+ )) |updated| {
+ state = State.unpack(updated);
+ continue;
+ }
+
+ if (!state.is_notified)
+ self.idle_semaphore.wait();
+ return;
+ }
+ }
+
+ fn notify(self: *ThreadPool) void {
+ var state = State.unpack(@atomicLoad(usize, &self.state, .SeqCst));
+ while (true) {
+ if (state.is_shutdown)
+ return;
+
+ var new_state = state;
+ if (state.is_notified) {
+ return;
+ } else if (state.idle_workers == 0) {
+ new_state.is_notified = true;
+ } else {
+ new_state.idle_workers -= 1;
+ }
+
+ if (@cmpxchgWeak(
+ usize,
+ &self.state,
+ state.pack(),
+ new_state.pack(),
+ .SeqCst,
+ .SeqCst,
+ )) |updated| {
+ state = State.unpack(updated);
+ continue;
+ }
+
+ if (!new_state.is_notified)
+ self.idle_semaphore.post();
+ return;
+ }
+ }
+
+ fn shutdown(self: *ThreadPool) void {
+ var state = State.unpack(@atomicRmw(
+ usize,
+ &self.state,
+ .Xchg,
+ (State{ .is_shutdown = true }).pack(),
+ .SeqCst,
+ ));
+
+ while (state.idle_workers > 0) : (state.idle_workers -= 1)
+ self.idle_semaphore.post();
+ }
+
+ const Worker = struct {
+ thread: *std.Thread,
+ run_queue: Queue,
+
+ fn init(self: *Worker, pool: *ThreadPool) !void {
+ self.* = Worker{
+ .thread = undefined,
+ .run_queue = Queue.init(),
+ };
+
+ self.thread = std.Thread.spawn(
+ Worker.run,
+ RunConfig{
+ .worker = self,
+ .pool = pool,
+ },
+ ) catch |err| {
+ self.run_queue.deinit();
+ return err;
+ };
+ }
+
+ fn deinit(self: *Worker) void {
+ self.thread.wait();
+ self.run_queue.deinit();
+ self.* = undefined;
+ }
+
+ threadlocal var current: ?*Worker = null;
+
+ const RunConfig = struct {
+ worker: *Worker,
+ pool: *ThreadPool,
+ };
+
+ fn run(config: RunConfig) void {
+ const self = config.worker;
+ const pool = config.pool;
+
+ const old_current = current;
+ current = self;
+ defer current = old_current;
+
+ var tick = @ptrToInt(self);
+ var prng = std.rand.DefaultPrng.init(tick);
+
+ while (true) {
+ const run_node = self.poll(tick, pool, &prng.random) orelse {
+ pool.wait() catch break;
+ continue;
+ };
+
+ tick +%= 1;
+ (run_node.data.runFn)(&run_node.data);
+ }
+ }
+
+ fn poll(self: *Worker, tick: usize, pool: *ThreadPool, rand: *std.rand.Random) ?*RunNode {
+ if (tick % 128 == 0) {
+ if (self.steal(pool, rand, .fair)) |run_node|
+ return run_node;
+ }
+
+ if (tick % 64 == 0) {
+ if (self.run_queue.steal(&pool.run_queue, .fair)) |run_node|
+ return run_node;
+ }
+
+ if (self.run_queue.pop()) |run_node|
+ return run_node;
+
+ var attempts: usize = 8;
+ while (attempts > 0) : (attempts -= 1) {
+ if (self.steal(pool, rand, .unfair)) |run_node| {
+ return run_node;
+ } else {
+ std.os.sched_yield() catch spinLoopHint();
+ }
+ }
+
+ if (self.run_queue.steal(&pool.run_queue, .unfair)) |run_node|
+ return run_node;
+
+ return null;
+ }
+
+ fn steal(self: *Worker, pool: *ThreadPool, rand: *std.rand.Random, mode: anytype) ?*RunNode {
+ const spawned = @atomicLoad(usize, &pool.spawned, .SeqCst);
+ if (spawned < 2)
+ return null;
+
+ var index = rand.uintLessThan(usize, spawned);
+
+ var iter = spawned;
+ while (iter > 0) : (iter -= 1) {
+ const target = &pool.workers[index];
+
+ index += 1;
+ if (index == spawned)
+ index = 0;
+
+ if (target == self)
+ continue;
+ if (self.run_queue.steal(&target.run_queue, mode)) |run_node|
+ return run_node;
+ }
+
+ return null;
+ }
+ };
+
+ const Queue = struct {
+ mutex: Mutex,
+ size: usize,
+ list: List,
+
+ fn init() Queue {
+ return Queue{
+ .mutex = Mutex.init(),
+ .size = 0,
+ .list = .{},
+ };
+ }
+
+ fn deinit(self: *Queue) void {
+ self.mutex.deinit();
+ self.* = undefined;
+ }
+
+ fn push(self: *Queue, node: *List.Node) void {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ self.list.prepend(node);
+ @atomicStore(usize, &self.size, self.size + 1, .SeqCst);
+ }
+
+ fn pop(self: *Queue) ?*List.Node {
+ return self.popFrom(.head);
+ }
+
+ fn steal(self: *Queue, target: *Queue, mode: enum { fair, unfair }) ?*RunNode {
+ return target.popFrom(switch (mode) {
+ .fair => .tail,
+ .unfair => .head,
+ });
+ }
+
+ fn popFrom(self: *Queue, side: enum { head, tail }) ?*RunNode {
+ if (@atomicLoad(usize, &self.size, .SeqCst) == 0)
+ return null;
+
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ // potential deadlock when all pops are fair..
+ const run_node = switch (side) {
+ .head => self.list.popFirst(),
+ .tail => self.list.pop(),
+ };
+
+ if (run_node != null)
+ @atomicStore(usize, &self.size, self.size - 1, .SeqCst);
+
+ return run_node;
+ }
+ };
+
+ const List = std.TailQueue(Runnable);
+ const RunNode = List.Node;
+ const Runnable = struct {
+ runFn: fn (*Runnable) void,
+ };
+};
+
+pub fn Channel(
+ comptime T: type,
+ comptime buffer_type: std.fifo.LinearFifoBufferType,
+) type {
+ return struct {
+ mutex: Mutex,
+ putters: Condvar,
+ getters: Condvar,
+ buffer: Buffer,
+ is_closed: bool,
+
+ const Self = @This();
+ const Buffer = std.fifo.LinearFifo(T, buffer_type);
+
+ pub usingnamespace switch (buffer_type) {
+ .Static => struct {
+ pub fn init() Self {
+ return Self.withBuffer(Buffer.init());
+ }
+ },
+ .Slice => struct {
+ pub fn init(buf: []T) Self {
+ return Self.withBuffer(Buffer.init(buf));
+ }
+ },
+ .Dynamic => struct {
+ pub fn init(allocator: *std.mem.Allocator) Self {
+ return Self.withBuffer(Buffer.init(allocator));
+ }
+ },
+ };
+
+ fn withBuffer(buffer: Buffer) Self {
+ return Self{
+ .mutex = Mutex.init(),
+ .putters = Condvar.init(),
+ .getters = Condvar.init(),
+ .buffer = buffer,
+ .is_closed = false,
+ };
+ }
+
+ pub fn deinit(self: *Self) void {
+ self.mutex.deinit();
+ self.putters.deinit();
+ self.getters.deinit();
+ self.buffer.deinit();
+ self.* = undefined;
+ }
+
+ pub fn close(self: *Self) void {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ if (self.is_closed)
+ return;
+
+ self.is_closed = true;
+ self.putters.broadcast();
+ self.getters.broadcast();
+ }
+
+ pub fn tryWriteItem(self: *Self, item: T) !bool {
+ const wrote = try self.write(&[1]T{item});
+ return wrote == 1;
+ }
+
+ pub fn writeItem(self: *Self, item: T) !void {
+ return self.writeAll(&[1]T{item});
+ }
+
+ pub fn write(self: *Self, items: []const T) !usize {
+ return self.writeItems(items, false);
+ }
+
+ pub fn tryReadItem(self: *Self) !?T {
+ var items: [1]T = undefined;
+ if ((try self.read(&items)) != 1)
+ return null;
+ return items[0];
+ }
+
+ pub fn readItem(self: *Self) !T {
+ var items: [1]T = undefined;
+ try self.readAll(&items);
+ return items[0];
+ }
+
+ pub fn read(self: *Self, items: []T) !usize {
+ return self.readItems(items, false);
+ }
+
+ pub fn writeAll(self: *Self, items: []const T) !void {
+ std.debug.assert((try self.writeItems(items, true)) == items.len);
+ }
+
+ pub fn readAll(self: *Self, items: []T) !void {
+ std.debug.assert((try self.readItems(items, true)) == items.len);
+ }
+
+ fn writeItems(self: *Self, items: []const T, should_block: bool) !usize {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ var pushed: usize = 0;
+ while (pushed < items.len) {
+ const did_push = blk: {
+ if (self.is_closed)
+ return error.Closed;
+
+ self.buffer.writeItem(item) catch |err| {
+ if (buffer_type == .Dynamic)
+ return err;
+ break :blk false;
+ };
+
+ self.getters.signal();
+ break :blk true;
+ };
+
+ if (did_push) {
+ pushed += 1;
+ } else if (should_block) {
+ self.putters.wait(&self.mutex);
+ } else {
+ break;
+ }
+ }
+
+ return pushed;
+ }
+
+ fn readItems(self: *Self, items: []T, should_block: bool) !usize {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ var popped: usize = 0;
+ while (popped < items.len) {
+ const new_item = blk: {
+ if (self.buffer.readItem()) |item| {
+ self.putters.signal();
+ break :blk item;
+ }
+
+ if (self.is_closed)
+ return error.Closed;
+
+ break :blk null;
+ };
+
+ if (new_item) |item| {
+ items[popped] = item;
+ popped += 1;
+ } else if (should_block) {
+ self.getters.wait(&self.mutex);
+ } else {
+ break;
+ }
+ }
+
+ return popped;
+ }
+ };
+}
+
+pub const RwLock = if (std.builtin.os.tag != .windows and std.builtin.link_libc)
+ struct {
+ rwlock: if (std.builtin.os.tag != .windows) pthread_rwlock_t else void,
+
+ pub fn init() RwLock {
+ return .{ .rwlock = PTHREAD_RWLOCK_INITIALIZER };
+ }
+
+ pub fn deinit(self: *RwLock) void {
+ const safe_rc = switch (std.builtin.os.tag) {
+ .dragonfly, .netbsd => std.os.EAGAIN,
+ else => 0,
+ };
+
+ const rc = std.c.pthread_rwlock_destroy(&self.rwlock);
+ std.debug.assert(rc == 0 or rc == safe_rc);
+
+ self.* = undefined;
+ }
+
+ pub fn tryLock(self: *RwLock) bool {
+ return pthread_rwlock_trywrlock(&self.rwlock) == 0;
+ }
+
+ pub fn lock(self: *RwLock) void {
+ const rc = pthread_rwlock_wrlock(&self.rwlock);
+ std.debug.assert(rc == 0);
+ }
+
+ pub fn unlock(self: *RwLock) void {
+ const rc = pthread_rwlock_unlock(&self.rwlock);
+ std.debug.assert(rc == 0);
+ }
+
+ pub fn tryLockShared(self: *RwLock) bool {
+ return pthread_rwlock_tryrdlock(&self.rwlock) == 0;
+ }
+
+ pub fn lockShared(self: *RwLock) void {
+ const rc = pthread_rwlock_rdlock(&self.rwlock);
+ std.debug.assert(rc == 0);
+ }
+
+ pub fn unlockShared(self: *RwLock) void {
+ const rc = pthread_rwlock_unlock(&self.rwlock);
+ std.debug.assert(rc == 0);
+ }
+
+ const PTHREAD_RWLOCK_INITIALIZER = pthread_rwlock_t{};
+ const pthread_rwlock_t = switch (std.builtin.os.tag) {
+ .macos, .ios, .watchos, .tvos => extern struct {
+ __sig: c_long = 0x2DA8B3B4,
+ __opaque: [192]u8 = [_]u8{0} ** 192,
+ },
+ .linux => switch (std.builtin.abi) {
+ .android => switch (@sizeOf(usize)) {
+ 4 => extern struct {
+ lock: std.c.pthread_mutex_t = std.c.PTHREAD_MUTEX_INITIALIZER,
+ cond: std.c.pthread_cond_t = std.c.PTHREAD_COND_INITIALIZER,
+ numLocks: c_int = 0,
+ writerThreadId: c_int = 0,
+ pendingReaders: c_int = 0,
+ pendingWriters: c_int = 0,
+ attr: i32 = 0,
+ __reserved: [12]u8 = [_]u8{0} ** 2,
+ },
+ 8 => extern struct {
+ numLocks: c_int = 0,
+ writerThreadId: c_int = 0,
+ pendingReaders: c_int = 0,
+ pendingWriters: c_int = 0,
+ attr: i32 = 0,
+ __reserved: [36]u8 = [_]u8{0} ** 36,
+ },
+ else => unreachable,
+ },
+ else => extern struct {
+ size: [56]u8 align(@alignOf(usize)) = [_]u8{0} ** 56,
+ },
+ },
+ .fuchsia => extern struct {
+ size: [56]u8 align(@alignOf(usize)) = [_]u8{0} ** 56,
+ },
+ .emscripten => extern struct {
+ size: [32]u8 align(4) = [_]u8{0} ** 32,
+ },
+ .netbsd => extern struct {
+ ptr_magic: c_uint = 0x99990009,
+ ptr_interlock: switch (std.builtin.arch) {
+ .aarch64, .sparc, .x86_64, .i386 => u8,
+ .arm, .powerpc => c_int,
+ else => unreachable,
+ } = 0,
+ ptr_rblocked_first: ?*u8 = null,
+ ptr_rblocked_last: ?*u8 = null,
+ ptr_wblocked_first: ?*u8 = null,
+ ptr_wblocked_last: ?*u8 = null,
+ ptr_nreaders: c_uint = 0,
+ ptr_owner: std.c.pthread_t = null,
+ ptr_private: ?*c_void = null,
+ },
+ .haiku => extern struct {
+ flags: u32 = 0,
+ owner: i32 = -1,
+ lock_sem: i32 = 0,
+ lock_count: i32 = 0,
+ reader_count: i32 = 0,
+ writer_count: i32 = 0,
+ waiters: [2]?*c_void = [_]?*c_void{ null, null },
+ },
+ .kfreebsd, .freebsd, .openbsd => extern struct {
+ ptr: ?*c_void = null,
+ },
+ .hermit => extern struct {
+ ptr: usize = std.math.maxInt(usize),
+ },
+ else => @compileError("pthread_rwlock_t not implemented for this platform"),
+ };
+
+ extern "c" fn pthread_rwlock_destroy(p: *pthread_rwlock_t) callconv(.C) c_int;
+ extern "c" fn pthread_rwlock_rdlock(p: *pthread_rwlock_t) callconv(.C) c_int;
+ extern "c" fn pthread_rwlock_wrlock(p: *pthread_rwlock_t) callconv(.C) c_int;
+ extern "c" fn pthread_rwlock_tryrdlock(p: *pthread_rwlock_t) callconv(.C) c_int;
+ extern "c" fn pthread_rwlock_trywrlock(p: *pthread_rwlock_t) callconv(.C) c_int;
+ extern "c" fn pthread_rwlock_unlock(p: *pthread_rwlock_t) callconv(.C) c_int;
+ }
+else
+ struct {
+ /// https://github.com/bloomberg/rwl-bench/blob/master/bench11.cpp
+ state: usize,
+ mutex: Mutex,
+ semaphore: Semaphore,
+
+ const IS_WRITING: usize = 1;
+ const WRITER: usize = 1 << 1;
+ const READER: usize = 1 << (1 + std.meta.bitCount(Count));
+ const WRITER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, WRITER);
+ const READER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, READER);
+ const Count = std.meta.Int(.unsigned, @divFloor(std.meta.bitCount(usize) - 1, 2));
+
+ pub fn init() RwLock {
+ return .{
+ .state = 0,
+ .mutex = Mutex.init(),
+ .semaphore = Semaphore.init(0),
+ };
+ }
+
+ pub fn deinit(self: *RwLock) void {
+ self.semaphore.deinit();
+ self.mutex.deinit();
+ self.* = undefined;
+ }
+
+ pub fn tryLock(self: *RwLock) bool {
+ if (self.mutex.tryLock()) {
+ const state = @atomicLoad(usize, &self.state, .SeqCst);
+ if (state & READER_MASK == 0) {
+ _ = @atomicRmw(usize, &self.state, .Or, IS_WRITING, .SeqCst);
+ return true;
+ }
+
+ self.mutex.unlock();
+ }
+
+ return false;
+ }
+
+ pub fn lock(self: *RwLock) void {
+ _ = @atomicRmw(usize, &self.state, .Add, WRITER, .SeqCst);
+ self.mutex.lock();
+
+ const state = @atomicRmw(usize, &self.state, .Or, IS_WRITING, .SeqCst);
+ if (state & READER_MASK != 0)
+ self.semaphore.wait();
+ }
+
+ pub fn unlock(self: *RwLock) void {
+ _ = @atomicRmw(usize, &self.state, .And, ~IS_WRITING, .SeqCst);
+ self.mutex.unlock();
+ }
+
+ pub fn tryLockShared(self: *RwLock) bool {
+ const state = @atomicLoad(usize, &self.state, .SeqCst);
+ if (state & (IS_WRITING | WRITER_MASK) == 0) {
+ _ = @cmpxchgStrong(
+ usize,
+ &self.state,
+ state,
+ state + READER,
+ .SeqCst,
+ .SeqCst,
+ ) orelse return true;
+ }
+
+ if (self.mutex.tryLock()) {
+ _ = @atomicRmw(usize, &self.state, .Add, READER, .SeqCst);
+ self.mutex.unlock();
+ return true;
+ }
+
+ return false;
+ }
+
+ pub fn lockShared(self: *RwLock) void {
+ var state = @atomicLoad(usize, &self.state, .SeqCst);
+ while (state & (IS_WRITING | WRITER_MASK) == 0) {
+ state = @cmpxchgWeak(
+ usize,
+ &self.state,
+ state,
+ state + READER,
+ .SeqCst,
+ .SeqCst,
+ ) orelse return;
+ }
+
+ self.mutex.lock();
+ _ = @atomicRmw(usize, &self.state, .Add, READER, .SeqCst);
+ self.mutex.unlock();
+ }
+
+ pub fn unlockShared(self: *RwLock) void {
+ const state = @atomicRmw(usize, &self.state, .Sub, READER, .SeqCst);
+
+ if ((state & READER_MASK == READER) and (state & IS_WRITING != 0))
+ self.semaphore.post();
+ }
+ };
+
+pub const WaitGroup = struct {
+ mutex: Mutex,
+ cond: Condvar,
+ active: usize,
+
+ pub fn init() WaitGroup {
+ return .{
+ .mutex = Mutex.init(),
+ .cond = Condvar.init(),
+ .active = 0,
+ };
+ }
+
+ pub fn deinit(self: *WaitGroup) void {
+ self.mutex.deinit();
+ self.cond.deinit();
+ self.* = undefined;
+ }
+
+ pub fn add(self: *WaitGroup) void {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ self.active += 1;
+ }
+
+ pub fn done(self: *WaitGroup) void {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ self.active -= 1;
+ if (self.active == 0)
+ self.cond.signal();
+ }
+
+ pub fn wait(self: *WaitGroup) void {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ while (self.active != 0)
+ self.cond.wait(&self.mutex);
+ }
+};
+
+pub const Semaphore = struct {
+ mutex: Mutex,
+ cond: Condvar,
+ permits: usize,
+
+ pub fn init(permits: usize) Semaphore {
+ return .{
+ .mutex = Mutex.init(),
+ .cond = Condvar.init(),
+ .permits = permits,
+ };
+ }
+
+ pub fn deinit(self: *Semaphore) void {
+ self.mutex.deinit();
+ self.cond.deinit();
+ self.* = undefined;
+ }
+
+ pub fn wait(self: *Semaphore) void {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ while (self.permits == 0)
+ self.cond.wait(&self.mutex);
+
+ self.permits -= 1;
+ if (self.permits > 0)
+ self.cond.signal();
+ }
+
+ pub fn post(self: *Semaphore) void {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+
+ self.permits += 1;
+ self.cond.signal();
+ }
+};
+
+pub const Mutex = if (std.builtin.os.tag == .windows)
+ struct {
+ srwlock: SRWLOCK,
+
+ pub fn init() Mutex {
+ return .{ .srwlock = SRWLOCK_INIT };
+ }
+
+ pub fn deinit(self: *Mutex) void {
+ self.* = undefined;
+ }
+
+ pub fn tryLock(self: *Mutex) bool {
+ return TryAcquireSRWLockExclusive(&self.srwlock) != system.FALSE;
+ }
+
+ pub fn lock(self: *Mutex) void {
+ AcquireSRWLockExclusive(&self.srwlock);
+ }
+
+ pub fn unlock(self: *Mutex) void {
+ ReleaseSRWLockExclusive(&self.srwlock);
+ }
+
+ const SRWLOCK = usize;
+ const SRWLOCK_INIT: SRWLOCK = 0;
+
+ extern "kernel32" fn TryAcquireSRWLockExclusive(s: *SRWLOCK) callconv(system.WINAPI) system.BOOL;
+ extern "kernel32" fn AcquireSRWLockExclusive(s: *SRWLOCK) callconv(system.WINAPI) void;
+ extern "kernel32" fn ReleaseSRWLockExclusive(s: *SRWLOCK) callconv(system.WINAPI) void;
+ }
+else if (std.builtin.link_libc)
+ struct {
+ mutex: if (std.builtin.link_libc) std.c.pthread_mutex_t else void,
+
+ pub fn init() Mutex {
+ return .{ .mutex = std.c.PTHREAD_MUTEX_INITIALIZER };
+ }
+
+ pub fn deinit(self: *Mutex) void {
+ const safe_rc = switch (std.builtin.os.tag) {
+ .dragonfly, .netbsd => std.os.EAGAIN,
+ else => 0,
+ };
+
+ const rc = std.c.pthread_mutex_destroy(&self.mutex);
+ std.debug.assert(rc == 0 or rc == safe_rc);
+
+ self.* = undefined;
+ }
+
+ pub fn tryLock(self: *Mutex) bool {
+ return pthread_mutex_trylock(&self.mutex) == 0;
+ }
+
+ pub fn lock(self: *Mutex) void {
+ const rc = std.c.pthread_mutex_lock(&self.mutex);
+ std.debug.assert(rc == 0);
+ }
+
+ pub fn unlock(self: *Mutex) void {
+ const rc = std.c.pthread_mutex_unlock(&self.mutex);
+ std.debug.assert(rc == 0);
+ }
+
+ extern "c" fn pthread_mutex_trylock(m: *std.c.pthread_mutex_t) callconv(.C) c_int;
+ }
+else if (std.builtin.os.tag == .linux)
+ struct {
+ state: State,
+
+ const State = enum(i32) {
+ unlocked,
+ locked,
+ waiting,
+ };
+
+ pub fn init() Mutex {
+ return .{ .state = .unlocked };
+ }
+
+ pub fn deinit(self: *Mutex) void {
+ self.* = undefined;
+ }
+
+ pub fn tryLock(self: *Mutex) bool {
+ return @cmpxchgStrong(
+ State,
+ &self.state,
+ .unlocked,
+ .locked,
+ .Acquire,
+ .Monotonic,
+ ) == null;
+ }
+
+ pub fn lock(self: *Mutex) void {
+ switch (@atomicRmw(State, &self.state, .Xchg, .locked, .Acquire)) {
+ .unlocked => {},
+ else => |s| self.lockSlow(s),
+ }
+ }
+
+ fn lockSlow(self: *Mutex, current_state: State) void {
+ @setCold(true);
+
+ var new_state = current_state;
+ while (true) {
+ var spin: u8 = 0;
+ while (spin < 100) : (spin += 1) {
+ const state = @cmpxchgWeak(
+ State,
+ &self.state,
+ .unlocked,
+ new_state,
+ .Acquire,
+ .Monotonic,
+ ) orelse return;
+
+ switch (state) {
+ .unlocked => {},
+ .locked => {},
+ .waiting => break,
+ }
+
+ var iter = spin + 1;
+ while (iter > 0) : (iter -= 1)
+ spinLoopHint();
+ }
+
+ new_state = .waiting;
+ switch (@atomicRmw(State, &self.state, .Xchg, new_state, .Acquire)) {
+ .unlocked => return,
+ else => {},
+ }
+
+ Futex.wait(
+ @ptrCast(*const i32, &self.state),
+ @enumToInt(new_state),
+ );
+ }
+ }
+
+ pub fn unlock(self: *Mutex) void {
+ switch (@atomicRmw(State, &self.state, .Xchg, .unlocked, .Release)) {
+ .unlocked => unreachable,
+ .locked => {},
+ .waiting => self.unlockSlow(),
+ }
+ }
+
+ fn unlockSlow(self: *Mutex) void {
+ @setCold(true);
+
+ Futex.wake(@ptrCast(*const i32, &self.state));
+ }
+ }
+else
+ struct {
+ is_locked: bool,
+
+ pub fn init() Mutex {
+ return .{ .is_locked = false };
+ }
+
+ pub fn deinit(self: *Mutex) void {
+ self.* = undefined;
+ }
+
+ pub fn tryLock(self: *Mutex) bool {
+ return @atomicRmw(bool, &self.is_locked, .Xchg, true, .Acquire) == false;
+ }
+
+ pub fn lock(self: *Mutex) void {
+ while (!self.tryLock())
+ spinLoopHint();
+ }
+
+ pub fn unlock(self: *Mutex) void {
+ @atomicStore(bool, &self.is_locked, false, .Release);
+ }
+ };
+
+pub const Condvar = if (std.builtin.os.tag == .windows)
+ struct {
+ cond: CONDITION_VARIABLE,
+
+ pub fn init() Condvar {
+ return .{ .cond = CONDITION_VARIABLE_INIT };
+ }
+
+ pub fn deinit(self: *Condvar) void {
+ self.* = undefined;
+ }
+
+ pub fn wait(self: *Condvar, mutex: *Mutex) void {
+ const rc = SleepConditionVariableSRW(
+ &self.cond,
+ &mutex.srwlock,
+ system.INFINITE,
+ @as(system.ULONG, 0),
+ );
+
+ std.debug.assert(rc != system.FALSE);
+ }
+
+ pub fn signal(self: *Condvar) void {
+ WakeConditionVariable(&self.cond);
+ }
+
+ pub fn broadcast(self: *Condvar) void {
+ WakeAllConditionVariable(&self.cond);
+ }
+
+ const SRWLOCK = usize;
+ const CONDITION_VARIABLE = usize;
+ const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = 0;
+
+ extern "kernel32" fn WakeAllConditionVariable(c: *CONDITION_VARIABLE) callconv(system.WINAPI) void;
+ extern "kernel32" fn WakeConditionVariable(c: *CONDITION_VARIABLE) callconv(system.WINAPI) void;
+ extern "kernel32" fn SleepConditionVariableSRW(
+ c: *CONDITION_VARIABLE,
+ s: *SRWLOCK,
+ t: system.DWORD,
+ f: system.ULONG,
+ ) callconv(system.WINAPI) system.BOOL;
+ }
+else if (std.builtin.link_libc)
+ struct {
+ cond: if (std.builtin.link_libc) std.c.pthread_cond_t else void,
+
+ pub fn init() Condvar {
+ return .{ .cond = std.c.PTHREAD_COND_INITIALIZER };
+ }
+
+ pub fn deinit(self: *Condvar) void {
+ const safe_rc = switch (std.builtin.os.tag) {
+ .dragonfly, .netbsd => std.os.EAGAIN,
+ else => 0,
+ };
+
+ const rc = std.c.pthread_cond_destroy(&self.cond);
+ std.debug.assert(rc == 0 or rc == safe_rc);
+
+ self.* = undefined;
+ }
+
+ pub fn wait(self: *Condvar, mutex: *Mutex) void {
+ const rc = std.c.pthread_cond_wait(&self.cond, &mutex.mutex);
+ std.debug.assert(rc == 0);
+ }
+
+ pub fn signal(self: *Condvar) void {
+ const rc = std.c.pthread_cond_signal(&self.cond);
+ std.debug.assert(rc == 0);
+ }
+
+ pub fn broadcast(self: *Condvar) void {
+ const rc = std.c.pthread_cond_broadcast(&self.cond);
+ std.debug.assert(rc == 0);
+ }
+ }
+else
+ struct {
+ mutex: Mutex,
+ notified: bool,
+ waiters: std.SinglyLinkedList(Event),
+
+ pub fn init() Condvar {
+ return .{
+ .mutex = Mutex.init(),
+ .notified = false,
+ .waiters = .{},
+ };
+ }
+
+ pub fn deinit(self: *Condvar) void {
+ self.mutex.deinit();
+ self.* = undefined;
+ }
+
+ pub fn wait(self: *Condvar, mutex: *Mutex) void {
+ self.mutex.lock();
+
+ if (self.notified) {
+ self.notified = false;
+ self.mutex.unlock();
+ return;
+ }
+
+ var wait_node = @TypeOf(self.waiters).Node{ .data = .{} };
+ self.waiters.prepend(&wait_node);
+ self.mutex.unlock();
+
+ mutex.unlock();
+ wait_node.data.wait();
+ mutex.lock();
+ }
+
+ pub fn signal(self: *Condvar) void {
+ self.mutex.lock();
+
+ const maybe_wait_node = self.waiters.popFirst();
+ if (maybe_wait_node == null)
+ self.notified = true;
+
+ self.mutex.unlock();
+
+ if (maybe_wait_node) |wait_node|
+ wait_node.data.set();
+ }
+
+ pub fn broadcast(self: *Condvar) void {
+ self.mutex.lock();
+
+ var waiters = self.waiters;
+ self.notified = true;
+
+ self.mutex.unlock();
+
+ while (waiters.popFirst()) |wait_node|
+ wait_node.data.set();
+ }
+
+ const Event = struct {
+ futex: i32 = 0,
+
+ fn wait(self: *Event) void {
+ while (@atomicLoad(i32, &self.futex, .Acquire) == 0) {
+ if (@hasDecl(Futex, "wait")) {
+ Futex.wait(&self.futex, 0);
+ } else {
+ spinLoopHint();
+ }
+ }
+ }
+
+ fn set(self: *Event) void {
+ @atomicStore(i32, &self.futex, 1, .Release);
+
+ if (@hasDecl(Futex, "wake"))
+ Futex.wake(&self.futex);
+ }
+ };
+ };
+
+const Futex = switch (std.builtin.os.tag) {
+ .linux => struct {
+ fn wait(ptr: *const i32, cmp: i32) void {
+ switch (system.getErrno(system.futex_wait(
+ ptr,
+ system.FUTEX_PRIVATE_FLAG | system.FUTEX_WAIT,
+ cmp,
+ null,
+ ))) {
+ 0 => {},
+ std.os.EINTR => {},
+ std.os.EAGAIN => {},
+ else => unreachable,
+ }
+ }
+
+ fn wake(ptr: *const i32) void {
+ switch (system.getErrno(system.futex_wake(
+ ptr,
+ system.FUTEX_PRIVATE_FLAG | system.FUTEX_WAKE,
+ @as(i32, 1),
+ ))) {
+ 0 => {},
+ std.os.EFAULT => {},
+ else => unreachable,
+ }
+ }
+ },
+ else => void,
+};
+
+fn spinLoopHint() void {
+ switch (std.builtin.cpu.arch) {
+ .i386, .x86_64 => asm volatile ("pause" ::: "memory"),
+ .arm, .aarch64 => asm volatile ("yield" ::: "memory"),
+ else => {},
+ }
+}
diff --git a/src/thread_safe_hash_map.zig b/src/thread_safe_hash_map.zig
new file mode 100644
index 000000000..321cb757f
--- /dev/null
+++ b/src/thread_safe_hash_map.zig
@@ -0,0 +1,35 @@
+const std = @import("std");
+const sync = @import("sync.zig");
+usingnamespace @import("global.zig");
+
+pub fn ThreadSafeStringHashMap(comptime Value: type) type {
+ const HashMapType = std.StringHashMap(Value);
+ return struct {
+ backing: HashMapType,
+ lock: sync.RwLock,
+ pub const HashMap = @This();
+
+ pub fn init(allocator: *std.mem.Allocator) !*HashMapType {
+ var self = try allocator.create(HashMapType);
+ self.* = HashMapType{ .backing = HashMapType.init(allocator), .lock = sync.RwLock.init() };
+
+ return self;
+ }
+
+ pub fn get(self: *HashMap, key: string) ?Value {
+ self.lock.lockShared();
+ defer self.lock.unlockShared();
+ return self.backing.get(key);
+ }
+
+ pub fn deinit(self: *HashMap, allocator: *std.mem.Allocator) void {
+ self.backing.deinit();
+ }
+
+ pub fn put(self: *HashMap, key: string, value: Value) !void {
+ self.lock.lock();
+ defer self.lock.unlock();
+ try self.backing.put(key, value);
+ }
+ };
+}