aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-05-10 20:05:53 -0700
committerGravatar Jarred Sumner <jarred@jarredsumner.com> 2021-05-10 20:05:53 -0700
commitfc75a0dea67aa55fa972b6244358c58ac03bd2d7 (patch)
treed2a9fd4802e3f9a01aee1618da8d604653695c8d
parent166c353ddbbd943d1bb49ad7e017a058b8f309ea (diff)
downloadbun-fc75a0dea67aa55fa972b6244358c58ac03bd2d7.tar.gz
bun-fc75a0dea67aa55fa972b6244358c58ac03bd2d7.tar.zst
bun-fc75a0dea67aa55fa972b6244358c58ac03bd2d7.zip
asdasdasdasd
Former-commit-id: 2b3c0584c623486d8ab5dc838bb7ba861b4395d7
-rw-r--r--README.md66
-rw-r--r--src/cache.zig187
-rw-r--r--src/defines.zig9
-rw-r--r--src/fs.zig29
-rw-r--r--src/global.zig5
-rw-r--r--src/js_ast.zig4
-rw-r--r--src/js_parser/js_parser.zig13
-rw-r--r--src/logger.zig66
-rw-r--r--src/main.zig2
-rw-r--r--src/options.zig47
-rw-r--r--src/resolver/data_url.zig160
-rw-r--r--src/resolver/package_json.zig181
-rw-r--r--src/resolver/resolve_path.zig21
-rw-r--r--src/resolver/resolver.zig357
-rw-r--r--src/resolver/tsconfig_json.zig319
-rw-r--r--src/string_mutable.zig3
16 files changed, 1374 insertions, 95 deletions
diff --git a/README.md b/README.md
index 4c5c2beb0..f423a6004 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# esdev
+# Speedy
Incredibly fast ECMAScript & TypeScript bundler designed for development.
@@ -8,7 +8,7 @@ JavaScript bundlers run very slow in web browsers.
## Purpose
-The purpose of esdev is to very quickly convert ECMAScript/TypeScript into something a web browser can execute.
+The purpose of Speedy is to very quickly convert ECMAScript/TypeScript into something a web browser can execute.
Goals:
@@ -59,33 +59,33 @@ There's a number of reasons for this:
#### Different constraints enable performance improvements
-If bundler means "merge N source files into 1 or few source file(s)", esdev is most definitely not a bundler. Unlike most bundlers today, esdev deliberately outputs
+If bundler means "merge N source files into 1 or few source file(s)", Speedy is most definitely not a bundler. Unlike most bundlers today, Speedy deliberately outputs
If bundler means "turn my development code into something a browser can run",
### Compatibility Table
-| Feature | esbuild | esdev |
-| ------------------------------------ | ------- | ----- |
-| JSX (transform) | ✅ | ✅ |
-| TypeScript (transform) | ✅ | ⌛ |
-| React Fast Refresh | ❌ | ⌛ |
-| Hot Module Reloading | ❌ | ⌛ |
-| Minification | ✅ | ❌ |
-| Tree Shaking | ✅ | ⌛ |
-| Incremental builds | ✅ | ⌛ |
-| CSS | ✅ | 🗓️ |
-| Expose CSS dependencies per file | ✅ | 🗓️ |
-| CommonJS, IIFE, UMD outputs | ✅ | ❌ |
-| Node.js build target | ✅ | ❌ |
-| Code Splitting | ✅ | ⌛ |
-| Browser build target | ✅ | ⌛ |
-| Bundling for production | ✅ | ❌ |
-| Support older browsers | ✅ | ❌ |
-| Plugins | ✅ | 🗓️ |
-| AST Plugins | ❌ | ❌ |
-| Filesystem Cache API (for plugins) | ❓ | 🗓️ |
-| Transform to ESM with `bundle` false | ❓ | ⌛ |
+| Feature | Speedy |
+| ------------------------------------ | ------ |
+| JSX (transform) | ✅ |
+| TypeScript (transform) | ⌛ |
+| React Fast Refresh | ⌛ |
+| Hot Module Reloading | ⌛ |
+| Minification | ❌ |
+| Tree Shaking | ⌛ |
+| Incremental builds | ⌛ |
+| CSS | 🗓️ |
+| Expose CSS dependencies per file | 🗓️ |
+| CommonJS, IIFE, UMD outputs | ❌ |
+| Node.js build target | ❌ |
+| Code Splitting | ⌛ |
+| Browser build target | ⌛ |
+| Bundling for production | ❌ |
+| Support older browsers | ❌ |
+| Plugins | 🗓️ |
+| AST Plugins | ❌ |
+| Filesystem Cache API (for plugins) | 🗓️ |
+| Transform to ESM with `bundle` false | ⌛ |
Key:
@@ -97,19 +97,29 @@ Key:
| 🗓️ | Planned but work has not started |
| ❓ | Unknown |
+### Compatibility Table (more info)
+
+| Feature | Speedy |
+| -------------------------------- | ------ |
+| `browser` in `package.json` | ⌛ |
+| main fields in `package.json` | ⌛ |
+| `exports` map in `package.json` | 🗓️ |
+| `side_effects` in `package.json` | 🗓️ |
+| `extends` in `tsconfig.json` | 🗓️ |
+
#### Notes
##### Hot Module Reloading & React Fast Refresh
-esdev exposes a runtime API to support Hot Module Reloading and React Fast Refresh. React Fast Refresh depends on Hot Module Reloading to work, but you can turn either of them off. esdev itself doesn't serve bundled files, it's up to the development server to provide that.
+Speedy exposes a runtime API to support Hot Module Reloading and React Fast Refresh. React Fast Refresh depends on Hot Module Reloading to work, but you can turn either of them off. Speedy itself doesn't serve bundled files, it's up to the development server to provide that.
##### Code Splitting
-esdev supports code splitting the way browsers do natively: through ES Modules. This works great for local development files. It doesn't work great for node_modules or for production due to the sheer number of network requests. There are plans to make this better, stay tuned.
+Speedy supports code splitting the way browsers do natively: through ES Modules. This works great for local development files. It doesn't work great for node_modules or for production due to the sheer number of network requests. There are plans to make this better, stay tuned.
##### Support older browsers
-To simplify the parser, esdev doesn't support lowering features to non-current browsers. This means if you run a development build with esdev with, for example, optional chaining, it won't work in Internet Explorer 11. If you want to support older browsers, use a different tool.
+To simplify the parser, Speedy doesn't support lowering features to non-current browsers. This means if you run a development build with Speedy with, for example, optional chaining, it won't work in Internet Explorer 11. If you want to support older browsers, use a different tool.
#### Implementation Notes
@@ -162,6 +172,6 @@ There are two ways to update references:
Either approach works.
-###### How it's implemented in esdev
+###### How it's implemented in Speedy
TODO: doc
diff --git a/src/cache.zig b/src/cache.zig
index 1f4abb91c..e0df61ae8 100644
--- a/src/cache.zig
+++ b/src/cache.zig
@@ -1,9 +1,188 @@
+usingnamespace @import("global.zig");
+
+const js_ast = @import("./js_ast.zig");
+const logger = @import("./logger.zig");
+const js_parser = @import("./js_parser/js_parser.zig");
+const json_parser = @import("./json_parser.zig");
+const options = @import("./options.zig");
+const Defines = @import("./defines.zig").Defines;
+const std = @import("std");
+const fs = @import("./fs.zig");
+
pub const Cache = struct {
- pub const Fs = struct {};
+ pub const Set = struct {
+ js: JavaScript,
+ fs: Fs,
+ json: Json,
+ };
+ pub const Fs = struct {
+ mutex: std.Thread.Mutex,
+ entries: std.StringHashMap(Entry),
+
+ pub const Entry = struct {
+ contents: string,
+ // Null means its not usable
+ mod_key: ?fs.FileSystem.Implementation.ModKey = null,
+
+ pub fn deinit(entry: *Entry, allocator: *std.mem.Allocator) void {
+ if (entry.contents.len > 0) {
+ allocator.free(entry.contents);
+ entry.contents = "";
+ }
+ }
+ };
+
+ pub fn deinit(c: *Fs) void {
+ var iter = c.entries.iterator();
+ while (iter.next()) |entry| {
+ entry.value.deinit(c.entries.allocator);
+ }
+ c.entries.deinit();
+ }
+
+ pub fn readFile(c: *Fs, _fs: fs.FileSystem, path: string) !*Entry {
+ const rfs: _fs.RealFS = _fs.fs;
+
+ {
+ const hold = c.mutex.acquire();
+ defer hold.release();
+ if (c.entries.get(path)) |entry| {
+ return entry;
+ }
+ }
+
+ // If the file's modification key hasn't changed since it was cached, assume
+ // the contents of the file are also the same and skip reading the file.
+ var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKey(path) catch |err| {
+ switch (err) {
+ error.FileNotFound, error.AccessDenied => {
+ return err;
+ },
+ else => {
+ if (isDebug) {
+ Output.printError("modkey error: {s}", .{@errorName(err)});
+ }
+ mod_key = null;
+ },
+ }
+ };
+
+ const size = if (mod_key != null) mod_key.?.size else null;
+ const file = rfs.readFile(path, size) catch |err| {
+ if (isDebug) {
+ Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
+ }
+ return err;
+ };
+
+ const entry = Entry{
+ .contents = file.contents,
+ .mod_key = mod_key,
+ };
+
+ const hold = c.mutex.acquire();
+ defer hold.release();
+ var res = c.entries.getOrPut(path, entry) catch unreachable;
+ if (res.found_existing) {
+ res.entry.value.deinit(c.entries.allocator);
+ }
+
+ res.entry.value = entry;
+ return &en.value;
+ }
+ };
+
+ pub const Css = struct {
+ pub const Entry = struct {};
+ pub const Result = struct {
+ ok: bool,
+ value: void,
+ };
+ pub fn parse(cache: *@This(), log: *logger.Log, source: logger.Source) !Result {
+ Global.notimpl();
+ }
+ };
+
+ pub const JavaScript = struct {
+ pub const Entry = struct {
+ ast: js_ast.Ast,
+ source: logger.Source,
+ ok: bool,
+ msgs: []logger.Msg,
+ };
+ pub const Result = js_ast.Result;
+ // For now, we're not going to cache JavaScript ASTs.
+ // It's probably only relevant when bundling for production.
+ pub fn parse(cache: *@This(), allocator: *std.mem.Allocator, opts: options.TransformOptions, defines: Defines, log: *logger.Log, source: logger.Source) anyerror!?js_ast.Ast {
+ var temp_log = logger.Log.init(allocator);
+ defer temp_log.deinit();
+
+ var parser = js_parser.Parser.init(opts, temp_log, &source, defines, allocator) catch |err| {
+ temp_log.appendTo(log) catch {};
+ return null;
+ };
+ const result = parser.parse() catch |err| {
+ temp_log.appendTo(log) catch {};
+ return null;
+ };
+
+ temp_log.appendTo(log) catch {};
+ return if (result.ok) result.ast else null;
+ }
+ };
+
+ pub const Json = struct {
+ pub const Entry = struct {
+ is_tsconfig: bool = false,
+ source: logger.Source,
+ expr: ?js_ast.Expr = null,
+ ok: bool = false,
+ // msgs: []logger.Msg,
+ };
+ mutex: std.Thread.Mutex,
+ entries: std.StringHashMap(*Entry),
+ pub fn init(allocator: *std.mem.Allocator) Json {
+ return Json{
+ .mutex = std.Thread.Mutex{},
+ .entries = std.StringHashMap(Entry).init(allocator),
+ };
+ }
+ fn parse(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator, is_tsconfig: bool, func: anytype) anyerror!?Expr {
+ {
+ const hold = cache.mutex.acquire();
+ defer hold.release();
+ if (cache.entries.get(source.key_path)) |entry| {
+ return entry.expr;
+ }
+ }
- pub const Css = struct {};
+ var temp_log = logger.Log.init(allocator);
+ defer {
+ temp_log.appendTo(log) catch {};
+ }
+ const expr = func(&source, &temp_log, allocator) catch {
+ null;
+ };
+ const entry = try allocator.create(Entry);
+ entry.* = Entry{
+ .is_tsconfig = is_tsconfig,
+ .source = source,
+ .expr = expr,
+ .ok = expr != null,
+ };
- pub const JavaScript = struct {};
+ const hold = cache.mutex.acquire();
+ defer hold.release();
+ std.debug.assert(source.key_path.len > 0); // missing key_path in source
+ try cache.entries.put(source.key_path, entry);
+ return entry.expr;
+ }
+ pub fn parseJSON(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?Expr {
+ return @call(std.builtin.CallOptions{ .modifier = .always_tail }, parse, .{ cache, log, opts, source, allocator, false, json_parser.ParseJSON });
+ }
- pub const Json = struct {};
+ pub fn parseTSConfig(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?Expr {
+ return @call(std.builtin.CallOptions{ .modifier = .always_tail }, parse, .{ cache, log, opts, source, allocator, true, json_parser.ParseTSConfig });
+ }
+ };
};
diff --git a/src/defines.zig b/src/defines.zig
index 020a0b8c9..48f5197bf 100644
--- a/src/defines.zig
+++ b/src/defines.zig
@@ -24,7 +24,7 @@ const Globals = struct {
pub const InfinityData = js_ast.Expr.Data{ .e_number = Globals.InfinityPtr };
};
-const defines_path = fs.Path.init("/tmp/internal/defines.json");
+const defines_path = fs.Path.initWithNamespace("defines.json", "internal");
pub const RawDefines = std.StringHashMap(string);
pub const UserDefines = std.StringHashMap(DefineData);
@@ -96,7 +96,12 @@ pub const DefineData = struct {
continue;
}
var _log = log;
- var source = logger.Source{ .contents = entry.value, .path = defines_path, .identifier_name = "" };
+ var source = logger.Source{
+ .contents = entry.value,
+ .path = defines_path,
+ .identifier_name = "defines",
+ .key_path = fs.Path.initWithNamespace("defines", "internal"),
+ };
var expr = try json_parser.ParseJSON(&source, _log, allocator);
var data: js_ast.Expr.Data = undefined;
switch (expr.data) {
diff --git a/src/fs.zig b/src/fs.zig
index 4952e3c18..c4dbbffa4 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -6,6 +6,8 @@ const alloc = @import("alloc.zig");
const expect = std.testing.expect;
const Mutex = std.Thread.Mutex;
+const resolvePath = @import("./resolver/resolve_path.zig").resolvePath;
+
// pub const FilesystemImplementation = @import("fs_impl.zig");
//
@@ -414,16 +416,15 @@ pub const FileSystem = struct {
return err;
}
- pub fn readFile(fs: *RealFS, path: string) !File {
+ pub fn readFile(fs: *RealFS, path: string, _size: ?usize) !File {
fs.limiter.before();
defer fs.limiter.after();
const file: std.fs.File = std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true, .write = false }) catch |err| return fs.readFileError(path, err);
defer file.close();
- // return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
- // TODO: this causes an extra call to .stat, do it manually and cache the results ourself.
- const size = try file.getEndPos() catch |err| return fs.readFileError(path, err);
+ // Skip the extra file.stat() call when possible
+ const size = _size orelse (try file.getEndPos() catch |err| return fs.readFileError(path, err));
const file_contents: []u8 = file.readToEndAllocOptions(fs.allocator, size, size, @alignOf(u8), null) catch |err| return fs.readFileError(path, err);
if (fs.watcher) |watcher| {
@@ -596,14 +597,24 @@ pub const PathName = struct {
}
};
+threadlocal var normalize_buf: [1024]u8 = undefined;
+
pub const Path = struct {
pretty: string,
text: string,
- namespace: string,
+ namespace: string = "unspecified",
name: PathName,
- // TODO:
- pub fn normalize(str: string) string {
+ pub fn generateKey(p: *Path, allocator: *std.mem.Allocator) !string {
+ return try std.fmt.allocPrint(allocator, "{s}://{s}", .{ p.namespace, p.text });
+ }
+
+ // for now, assume you won't try to normalize a path longer than 1024 chars
+ pub fn normalize(str: string, allocator: *std.mem.Allocator) string {
+ if (str.len == 0 or (str.len == 1 and str[0] == ' ')) return ".";
+ if (resolvePath(normalize_buf, str)) |out| {
+ return allocator.dupe(u8, out) catch unreachable;
+ }
return str;
}
@@ -611,6 +622,10 @@ pub const Path = struct {
return Path{ .pretty = text, .text = text, .namespace = "file", .name = PathName.init(text) };
}
+ pub fn initWithNamespace(text: string, namespace: string) Path {
+ return Path{ .pretty = text, .text = text, .namespace = namespace, .name = PathName.init(text) };
+ }
+
pub fn isBefore(a: *Path, b: Path) bool {
return a.namespace > b.namespace ||
(a.namespace == b.namespace and (a.text < b.text ||
diff --git a/src/global.zig b/src/global.zig
index 478035519..b93569b96 100644
--- a/src/global.zig
+++ b/src/global.zig
@@ -16,6 +16,8 @@ pub const isWasm = build_target == .wasm;
pub const isNative = build_target == .native;
pub const isWasi = build_target == .wasi;
+pub const isDebug = std.builtin.Mode.Debug == std.builtin.mode;
+
pub const Output = struct {
var source: *Source = undefined;
pub const Source = struct {
@@ -85,4 +87,7 @@ pub const Global = struct {
std.debug.panic(fmt, args);
}
}
+ pub fn notimpl() noreturn {
+ Global.panic("Not implemented yet!!!!!", .{});
+ }
};
diff --git a/src/js_ast.zig b/src/js_ast.zig
index 95dd3b96f..e7bf82dfa 100644
--- a/src/js_ast.zig
+++ b/src/js_ast.zig
@@ -1403,7 +1403,9 @@ pub const Expr = struct {
return if (key_str.isUTF8()) key_str.value else key_str.string(allocator);
}
- pub fn getBool(expr: *Expr, allocator: *std.mem.Allocator) ?bool {
+ pub fn getBool(
+ expr: *Expr,
+ ) ?bool {
const obj: *E.Boolean = expr.data.e_boolean orelse return null;
return obj.value;
diff --git a/src/js_parser/js_parser.zig b/src/js_parser/js_parser.zig
index 962ffb810..9ecec4c4a 100644
--- a/src/js_parser/js_parser.zig
+++ b/src/js_parser/js_parser.zig
@@ -1245,7 +1245,7 @@ pub const Parser = struct {
p: ?*P,
pub const Options = struct {
- jsx: options.JSX,
+ jsx: options.JSX.Pragma,
ts: bool = false,
ascii_only: bool = true,
keep_names: bool = true,
@@ -1431,16 +1431,9 @@ pub const Parser = struct {
pub fn init(transform: options.TransformOptions, log: *logger.Log, source: *logger.Source, define: *Define, allocator: *std.mem.Allocator) !Parser {
const lexer = try js_lexer.Lexer.init(log, source, allocator);
+ const jsx = if (transform.jsx != null) transform.jsx.? else options.JSX.Pragma{ .parse = false };
return Parser{
- .options = Options{
- .ts = transform.loader == .tsx or transform.loader == .ts,
- .jsx = options.JSX{
- .parse = transform.loader == .tsx or transform.loader == .jsx,
- .factory = transform.jsx_factory,
- .fragment = transform.jsx_fragment,
- .import_source = transform.jsx_import_source,
- },
- },
+ .options = Options{ .ts = transform.loader == .tsx or transform.loader == .ts, .jsx = jsx },
.allocator = allocator,
.lexer = lexer,
.define = define,
diff --git a/src/logger.zig b/src/logger.zig
index 1b286380f..cbf86b63c 100644
--- a/src/logger.zig
+++ b/src/logger.zig
@@ -59,6 +59,9 @@ pub const Location = struct {
suggestion: ?string = null,
offset: usize = 0,
+ // don't really know what's safe to deinit here!
+ pub fn deinit(l: *Location, allocator: *std.mem.Allocator) void {}
+
pub fn init(file: []u8, namespace: []u8, line: i32, column: i32, length: u32, line_text: ?[]u8, suggestion: ?[]u8) Location {
return Location{
.file = file,
@@ -104,12 +107,32 @@ pub const Location = struct {
}
};
-pub const Data = struct { text: string, location: ?Location = null };
+pub const Data = struct {
+ text: string,
+ location: ?Location = null,
+ pub fn deinit(d: *Data, allocator: *std.mem.Allocator) void {
+ if (d.location) |loc| {
+ loc.deinit(allocator);
+ }
+
+ allocator.free(text);
+ }
+};
pub const Msg = struct {
kind: Kind = Kind.err,
data: Data,
notes: ?[]Data = null,
+
+ pub fn deinit(msg: *Msg, allocator: *std.mem.Allocator) void {
+ msg.data.deinit(allocator);
+ if (msg.notes) |notes| {
+ for (notes) |note| {
+ note.deinit(allocator);
+ }
+ }
+ msg.notes = null;
+ }
pub fn doFormat(msg: *const Msg, to: anytype, formatterFunc: @TypeOf(std.fmt.format)) !void {
try formatterFunc(to, "\n\n{s}: {s}\n{s}\n{s}:{}:{} {d}", .{
msg.kind.string(),
@@ -157,6 +180,15 @@ pub const Log = struct {
warnings: usize = 0,
errors: usize = 0,
msgs: ArrayList(Msg),
+ level: Level = Level.debug,
+
+ pub const Level = enum {
+ verbose,
+ debug,
+ info,
+ warn,
+ err,
+ };
pub fn init(allocator: *std.mem.Allocator) Log {
return Log{
@@ -171,6 +203,17 @@ pub const Log = struct {
});
}
+ pub fn appendTo(self: *Log, other: *Log) !void {
+ other.msgs.appendSlice(self.msgs.items);
+ other.warnings += self.warnings;
+ other.errors += self.errors;
+ self.msgs.deinit();
+ }
+
+ pub fn deinit(self: *Log) void {
+ self.msgs.deinit();
+ }
+
pub fn addVerboseWithNotes(source: ?*Source, loc: Loc, text: string, notes: []Data) !void {
try log.addMsg(Msg{
.kind = .verbose,
@@ -251,6 +294,15 @@ pub const Log = struct {
});
}
+ pub fn addRangeDebugWithNotes(log: *Log, source: ?*Source, r: Range, text: string, notes: []Data) !void {
+ log.errors += 1;
+ try log.addMsg(Msg{
+ .kind = Kind.debug,
+ .data = rangeData(source, r, text),
+ .notes = notes,
+ });
+ }
+
pub fn addRangeErrorWithNotes(log: *Log, source: ?*Source, r: Range, text: string, notes: []Data) !void {
log.errors += 1;
try log.addMsg(Msg{
@@ -298,6 +350,7 @@ pub fn usize2Loc(loc: usize) Loc {
pub const Source = struct {
path: fs.Path,
+ key_path: fs.Path,
index: u32 = 0,
contents: string,
@@ -313,11 +366,18 @@ pub const Source = struct {
line_count: usize,
};
- pub fn initFile(file: fs.File, allocator: *std.mem.Allocator) Source {
+ pub fn initFile(file: fs.File, allocator: *std.mem.Allocator) !Source {
var name = file.path.name;
var identifier_name = name.nonUniqueNameString(allocator) catch unreachable;
- return Source{ .path = file.path, .identifier_name = identifier_name, .contents = file.contents };
+ var source = Source{
+ .path = file.path,
+ .key_path = fs.Path.init(file.path.text),
+ .identifier_name = identifier_name,
+ .contents = file.contents,
+ };
+ source.path.namespace = "file";
+ return source;
}
pub fn initPathString(pathString: string, contents: string) Source {
diff --git a/src/main.zig b/src/main.zig
index 1edb78dde..b0f98177e 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -54,7 +54,7 @@ pub fn main() anyerror!void {
const opts = try options.TransformOptions.initUncached(alloc.dynamic, entryPointName, code);
- var source = logger.Source.initFile(opts.entry_point, alloc.dynamic);
+ var source = try logger.Source.initFile(opts.entry_point, alloc.dynamic);
var ast: js_ast.Ast = undefined;
var raw_defines = RawDefines.init(alloc.static);
try raw_defines.put("process.env.NODE_ENV", "\"development\"");
diff --git a/src/options.zig b/src/options.zig
index f4c2a85c1..3917fdaa7 100644
--- a/src/options.zig
+++ b/src/options.zig
@@ -7,6 +7,17 @@ usingnamespace @import("global.zig");
const assert = std.debug.assert;
+pub const ModuleType = enum {
+ unknown,
+ cjs,
+ esm,
+
+ pub const List = std.ComptimeStringMap(ModuleType, .{
+ .{ "commonjs", ModuleType.cjs },
+ .{ "module", ModuleType.esm },
+ });
+};
+
pub const Platform = enum {
node,
browser,
@@ -14,7 +25,7 @@ pub const Platform = enum {
const MAIN_FIELD_NAMES = [_]string{ "browser", "module", "main" };
pub const DefaultMainFields: std.EnumArray(Platform, []string) = comptime {
- var array = std.EnumArray(Platform, []string);
+ var array = std.EnumArray(Platform, []string).initUndefined();
// Note that this means if a package specifies "module" and "main", the ES6
// module will not be selected. This means tree shaking will not work when
@@ -32,7 +43,8 @@ pub const Platform = enum {
// If you want to enable tree shaking when targeting node, you will have to
// configure the main fields to be "module" and then "main". Keep in mind
// that some packages may break if you do this.
- array.set(Platform.node, &([_]string{ MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[2] }));
+ var list = [_]string{ MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[2] };
+ array.set(Platform.node, &list);
// Note that this means if a package specifies "main", "module", and
// "browser" then "browser" will win out over "module". This is the
@@ -41,7 +53,8 @@ pub const Platform = enum {
// This is deliberate because the presence of the "browser" field is a
// good signal that the "module" field may have non-browser stuff in it,
// which will crash or fail to be bundled when targeting the browser.
- array.set(Platform.browser, &([_]string{ MAIN_FIELD_NAMES[0], MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[2] }));
+ var listc = [_]string{ MAIN_FIELD_NAMES[0], MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[2] };
+ array.set(Platform.browser, &listc);
// The neutral platform is for people that don't want esbuild to try to
// pick good defaults for their platform. In that case, the list of main
@@ -60,6 +73,10 @@ pub const Loader = enum {
css,
file,
json,
+
+ pub fn isJSX(loader: Loader) bool {
+ return loader == .jsx or loader == .tsx;
+ }
};
pub const defaultLoaders = std.ComptimeStringMap(Loader, .{
@@ -82,6 +99,10 @@ pub const JSX = struct {
/// Set on a per file basis like this:
/// /** @jsxImportSource @emotion/core */
import_source: string = "react",
+ jsx: string = "jsxDEV",
+
+ development: bool = true,
+ parse: bool = true,
};
parse: bool = true,
@@ -106,24 +127,23 @@ pub const TransformOptions = struct {
footer: string = "",
banner: string = "",
define: std.StringHashMap(string),
- loader: Loader = Loader.tsx,
+ loader: Loader = Loader.js,
resolve_dir: string = "/",
- jsx_factory: string = "React.createElement",
- jsx_fragment: string = "Fragment",
- jsx_import_source: string = "react",
- ts: bool = true,
+ jsx: ?JSX.Pragma,
react_fast_refresh: bool = false,
inject: ?[]string = null,
public_url: string = "/",
- filesystem_cache: std.StringHashMap(fs.File),
+ preserve_symlinks: bool = false,
entry_point: fs.File,
resolve_paths: bool = false,
+ tsconfig_override: ?string = null,
+
+ platform: Platform = Platform.browser,
+ main_fields: []string = Platform.DefaultMainFields.get(Platform.browser),
pub fn initUncached(allocator: *std.mem.Allocator, entryPointName: string, code: string) !TransformOptions {
assert(entryPointName.len > 0);
- var filesystemCache = std.StringHashMap(fs.File).init(allocator);
-
var entryPoint = fs.File{
.path = fs.Path.init(entryPointName),
.contents = code,
@@ -139,16 +159,15 @@ pub const TransformOptions = struct {
loader = defaultLoader;
}
- assert(loader != .file);
assert(code.len > 0);
- try filesystemCache.put(entryPointName, entryPoint);
return TransformOptions{
.entry_point = entryPoint,
.define = define,
.loader = loader,
- .filesystem_cache = filesystemCache,
.resolve_dir = entryPoint.path.name.dir,
+ .main_fields = Platform.DefaultMainFields.get(Platform.browser),
+ .jsx = if (Loader.isJSX(loader)) JSX.Pragma{} else null,
};
}
};
diff --git a/src/resolver/data_url.zig b/src/resolver/data_url.zig
new file mode 100644
index 000000000..48076521b
--- /dev/null
+++ b/src/resolver/data_url.zig
@@ -0,0 +1,160 @@
+usingnamespace @import("../global.zig");
+
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
+
+// https://github.com/Vexu/zuri/blob/master/src/zuri.zig#L61-L127
+pub const PercentEncoding = struct {
+ /// possible errors for decode and encode
+ pub const EncodeError = error{
+ InvalidCharacter,
+ OutOfMemory,
+ };
+
+ /// returns true if c is a hexadecimal digit
+ pub fn isHex(c: u8) bool {
+ return switch (c) {
+ '0'...'9', 'a'...'f', 'A'...'F' => true,
+ else => false,
+ };
+ }
+
+ /// returns true if str starts with a valid path character or a percent encoded octet
+ pub fn isPchar(str: []const u8) bool {
+ assert(str.len > 0);
+ return switch (str[0]) {
+ 'a'...'z', 'A'...'Z', '0'...'9', '-', '.', '_', '~', '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '@' => true,
+ '%' => str.len > 3 and isHex(str[1]) and isHex(str[2]),
+ else => false,
+ };
+ }
+
+ /// decode path if it is percent encoded
+ pub fn decode(allocator: *Allocator, path: []const u8) EncodeError!?[]u8 {
+ var ret: ?[]u8 = null;
+ errdefer if (ret) |some| allocator.free(some);
+ var ret_index: usize = 0;
+ var i: usize = 0;
+
+ while (i < path.len) : (i += 1) {
+ if (path[i] == '%') {
+ if (!isPchar(path[i..])) {
+ return error.InvalidCharacter;
+ }
+ if (ret == null) {
+ ret = try allocator.alloc(u8, path.len);
+ mem.copy(u8, ret.?, path[0..i]);
+ ret_index = i;
+ }
+
+ // charToDigit can't fail because the chars are validated earlier
+ var new = (std.fmt.charToDigit(path[i + 1], 16) catch unreachable) << 4;
+ new |= std.fmt.charToDigit(path[i + 2], 16) catch unreachable;
+ ret.?[ret_index] = new;
+ ret_index += 1;
+ i += 2;
+ } else if (path[i] != '/' and !isPchar(path[i..])) {
+ return error.InvalidCharacter;
+ } else if (ret != null) {
+ ret.?[ret_index] = path[i];
+ ret_index += 1;
+ }
+ }
+
+ if (ret) |some| return allocator.shrink(some, ret_index);
+ return null;
+ }
+
+ /// percent encode if path contains characters not allowed in paths
+ pub fn encode(allocator: *Allocator, path: []const u8) EncodeError!?[]u8 {
+ var ret: ?[]u8 = null;
+ var ret_index: usize = 0;
+ for (path) |c, i| {
+ if (c != '/' and !isPchar(path[i..])) {
+ if (ret == null) {
+ ret = try allocator.alloc(u8, path.len * 3);
+ mem.copy(u8, ret.?, path[0..i]);
+ ret_index = i;
+ }
+ const hex_digits = "0123456789ABCDEF";
+ ret.?[ret_index] = '%';
+ ret.?[ret_index + 1] = hex_digits[(c & 0xF0) >> 4];
+ ret.?[ret_index + 2] = hex_digits[c & 0x0F];
+ ret_index += 3;
+ } else if (ret != null) {
+ ret.?[ret_index] = c;
+ ret_index += 1;
+ }
+ }
+
+ if (ret) |some| return allocator.shrink(some, ret_index);
+ return null;
+ }
+};
+
+pub const MimeType = enum {
+ Unsupported,
+ TextCSS,
+ TextJavaScript,
+ ApplicationJSON,
+
+ pub const Map = std.ComptimeStringMap(MimeType, .{
+ .{ "text/css", MimeType.TextCSS },
+ .{ "text/javascript", MimeType.TextJavaScript },
+ .{ "application/json", MimeType.ApplicationJSON },
+ });
+
+ pub fn decode(str: string) MimeType {
+ // Remove things like ";charset=utf-8"
+ var mime_type = str;
+ if (strings.indexOfChar(mime_type, ';')) |semicolon| {
+ mime_type = mime_type[0..semicolon];
+ }
+
+ return Map.get(mime_type) orelse MimeType.Unsupported;
+ }
+};
+
+pub const DataURL = struct {
+ mime_type: string,
+ data: string,
+ is_base64: bool,
+
+ pub fn parse(url: string) ?DataURL {
+ if (!strings.startsWith(url, "data:")) {
+ return null;
+ }
+
+ const comma = strings.indexOfChar(url, ',') orelse return null;
+
+ var parsed = DataURL{
+ .mime_type = url["data:"..comma],
+ .data = url[comma + 1 .. url.len],
+ };
+
+ if (strings.endsWith(parsed.mime_type, ";base64")) {
+ parsed.mime_type = parsed.mime_type[0..(parsed.mime_type.len - ";base64".len)];
+ parsed.is_base64 = true;
+ }
+
+ return parsed;
+ }
+
+ pub fn decode_mime_type(d: DataURL) MimeType {
+ return MimeType.decode(d.mime_type);
+ }
+
+ pub fn decode_data(d: *DataURL, allocator: *std.mem.Allocator, url: string) !string {
+ // Try to read base64 data
+ if (d.is_base64) {
+ const size = try std.base64.standard.Decoder.calcSizeForSlice(d.data);
+ var buf = try allocator.alloc(u8, size);
+ try std.base64.standard.Decoder.decode(buf, d.data);
+ return buf;
+ }
+
+ // Try to read percent-escaped data
+ return try PercentEncoding.decode(allocator, url);
+ }
+};
diff --git a/src/resolver/package_json.zig b/src/resolver/package_json.zig
new file mode 100644
index 000000000..3bab16ef2
--- /dev/null
+++ b/src/resolver/package_json.zig
@@ -0,0 +1,181 @@
+usingnamespace @import("../global.zig");
+const std = @import("std");
+const options = @import("../options.zig");
+const log = @import("../logger.zig");
+const cache = @import("../cache.zig");
+const logger = @import("../logger.zig");
+const js_ast = @import("../js_ast.zig");
+const alloc = @import("../alloc.zig");
+const fs = @import("../fs.zig");
+const resolver = @import("./resolver.zig");
+
+const MainFieldMap = std.StringHashMap(string);
+const BrowserMap = std.StringHashMap(string);
+
+pub const PackageJSON = struct {
+ source: logger.Source,
+ main_fields: MainFieldMap,
+ module_type: options.ModuleType,
+
+ // Present if the "browser" field is present. This field is intended to be
+ // used by bundlers and lets you redirect the paths of certain 3rd-party
+ // modules that don't work in the browser to other modules that shim that
+ // functionality. That way you don't have to rewrite the code for those 3rd-
+ // party modules. For example, you might remap the native "util" node module
+ // to something like https://www.npmjs.com/package/util so it works in the
+ // browser.
+ //
+ // This field contains a mapping of absolute paths to absolute paths. Mapping
+ // to an empty path indicates that the module is disabled. As far as I can
+ // tell, the official spec is an abandoned GitHub repo hosted by a user account:
+ // https://github.com/defunctzombie/package-browser-field-spec. The npm docs
+ // say almost nothing: https://docs.npmjs.com/files/package.json.
+ //
+ // Note that the non-package "browser" map has to be checked twice to match
+ // Webpack's behavior: once before resolution and once after resolution. It
+ // leads to some unintuitive failure cases that we must emulate around missing
+ // file extensions:
+ //
+ // * Given the mapping "./no-ext": "./no-ext-browser.js" the query "./no-ext"
+ // should match but the query "./no-ext.js" should NOT match.
+ //
+ // * Given the mapping "./ext.js": "./ext-browser.js" the query "./ext.js"
+ // should match and the query "./ext" should ALSO match.
+ //
+ browser_map: BrowserMap,
+
+ pub fn parse(r: *resolver.Resolver, input_path: string) ?*PackageJSON {
+ if (!has_set_default_main_fields) {
+ has_set_default_main_fields = true;
+ }
+
+ const parts = [_]string{ input_path, "package.json" };
+ const package_json_path = std.fs.path.join(r.allocator, &parts) catch unreachable;
+ errdefer r.allocator.free(package_json_path);
+
+ const entry: *r.caches.fs.Entry = try r.caches.fs.readFile(r.fs, input_path) catch |err| {
+ r.log.addErrorFmt(null, .empty, r.allocator, "Cannot read file \"{s}\": {s}", .{ r.prettyPath(fs.Path.init(input_path)), @errorName(err) }) catch unreachable;
+ return null;
+ };
+
+ if (r.debug_logs) |debug| {
+ debug.addNoteFmt("The file \"{s}\" exists", .{package_json_path}) catch unreachable;
+ }
+
+ const key_path = fs.Path.init(allocator.dupe(package_json_path) catch unreachable);
+
+ var json_source = logger.Source.initPathString(key_path);
+ json_source.contents = entry.contents;
+ json_source.path.pretty = r.prettyPath(json_source.path);
+
+ const json: js_ast.Expr = (r.caches.json.parseJSON(r.log, json_source, r.allocator) catch |err| {
+ if (isDebug) {
+ Output.printError("{s}: JSON parse error: {s}", .{ package_json_path, @errorName(err) });
+ }
+ return null;
+ } orelse return null);
+
+ var package_json = PackageJSON{
+ .source = json_source,
+ .browser_map = BrowserMap.init(r.allocator),
+ .main_fields_map = MainFieldMap.init(r.allocator),
+ };
+
+ if (json.getProperty("type")) |type_json| {
+ if (type_json.expr.getString(r.allocator)) |type_str| {
+ switch (options.ModuleType.List.get(type_str) orelse options.ModuleType.unknown) {
+ .cjs => {
+ package_json.module_type = .cjs;
+ },
+ .esm => {
+ package_json.module_type = .esm;
+ },
+ .unknown => {
+ r.log.addRangeWarningFmt(
+ &json_source,
+ json_source.rangeOfString(type_json.loc),
+ r.allocator,
+ "\"{s}\" is not a valid value for \"type\" field (must be either \"commonjs\" or \"module\")",
+ .{type_str},
+ ) catch unreachable;
+ },
+ }
+ } else {
+ r.log.addWarning(&json_source, type_json.loc, "The value for \"type\" must be a string") catch unreachable;
+ }
+ }
+
+ // Read the "main" fields
+ for (r.opts.main_fields) |main| {
+ if (json.getProperty(main)) |main_json| {
+ const expr: js_ast.Expr = main_json.expr;
+
+ if ((main_json.getString(r.allocator) catch null)) |str| {
+ if (str.len > 0) {
+ package_json.main_fields.put(main, str) catch unreachable;
+ }
+ }
+ }
+ }
+
+ // Read the "browser" property, but only when targeting the browser
+ if (r.opts.platform == .browser) {
+ // We both want the ability to have the option of CJS vs. ESM and the
+ // option of having node vs. browser. The way to do this is to use the
+ // object literal form of the "browser" field like this:
+ //
+ // "main": "dist/index.node.cjs.js",
+ // "module": "dist/index.node.esm.js",
+ // "browser": {
+ // "./dist/index.node.cjs.js": "./dist/index.browser.cjs.js",
+ // "./dist/index.node.esm.js": "./dist/index.browser.esm.js"
+ // },
+ //
+ if (json.getProperty("browser")) |browser_prop| {
+ switch (browser_prop.data) {
+ .e_object => |obj| {
+ // The value is an object
+
+ // Remap all files in the browser field
+ for (obj.properties) |prop| {
+ var _key_str = (prop.key orelse continue).getString(r.allocator) catch unreachable;
+ const value: js_ast.Expr = prop.value orelse continue;
+
+ // Normalize the path so we can compare against it without getting
+ // confused by "./". There is no distinction between package paths and
+ // relative paths for these values because some tools (i.e. Browserify)
+ // don't make such a distinction.
+ //
+ // This leads to weird things like a mapping for "./foo" matching an
+ // import of "foo", but that's actually not a bug. Or arguably it's a
+ // bug in Browserify but we have to replicate this bug because packages
+ // do this in the wild.
+ const key = fs.Path.normalize(_key_str, r.allocator);
+
+ switch (value.data) {
+ .e_string => |str| {
+ // If this is a string, it's a replacement package
+ package_json.browser_map.put(key, str) catch unreachable;
+ },
+ .e_boolean => |boolean| {
+ if (!boolean.value) {
+ package_json.browser_map.put(key, "") catch unreachable;
+ }
+ },
+ else => {
+ r.log.addWarning("Each \"browser\" mapping must be a string or boolean", value.loc) catch unreachable;
+ },
+ }
+ }
+ },
+ else => {},
+ }
+ }
+ }
+
+ // TODO: side effects
+ // TODO: exports map
+
+ return package_json;
+ }
+};
diff --git a/src/resolver/resolve_path.zig b/src/resolver/resolve_path.zig
index 81921e510..78f00cf96 100644
--- a/src/resolver/resolve_path.zig
+++ b/src/resolver/resolve_path.zig
@@ -4,19 +4,14 @@
const std = @import("std");
/// Resolves a unix-like path and removes all "." and ".." from it. Will not escape the root and can be used to sanitize inputs.
-pub fn resolvePath(buffer: []u8, src_path: []const u8) error{BufferTooSmall}![]u8 {
- if (buffer.len == 0)
- return error.BufferTooSmall;
- if (src_path.len == 0) {
- buffer[0] = '/';
- return buffer[0..1];
- }
-
+pub fn resolvePath(buffer: []u8, src_path: []const u8) ?[]u8 {
var end: usize = 0;
- buffer[0] = '/';
+ buffer[0] = '.';
var iter = std.mem.tokenize(src_path, "/");
while (iter.next()) |segment| {
+ if (end >= buffer.len) break;
+
if (std.mem.eql(u8, segment, ".")) {
continue;
} else if (std.mem.eql(u8, segment, "..")) {
@@ -39,10 +34,16 @@ pub fn resolvePath(buffer: []u8, src_path: []const u8) error{BufferTooSmall}![]u
}
}
- return if (end == 0)
+ const result = if (end == 0)
buffer[0 .. end + 1]
else
buffer[0..end];
+
+ if (std.mem.eql(u8, result, src_path)) {
+ return null;
+ }
+
+ return result;
}
fn testResolve(expected: []const u8, input: []const u8) !void {
diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig
index 138b1b72f..164002b80 100644
--- a/src/resolver/resolver.zig
+++ b/src/resolver/resolver.zig
@@ -4,7 +4,15 @@ const logger = @import("../logger.zig");
const options = @import("../options.zig");
const fs = @import("../fs.zig");
const std = @import("std");
+const cache = @import("../cache.zig");
+const TSConfigJSON = @import("./tsconfig_json.zig").TSConfigJSON;
+const PackageJSON = @import("./package_json.zig").PackageJSON;
+usingnamespace @import("./data_url.zig");
+
+const StringBoolMap = std.StringHashMap(bool);
+
+const Path = fs.Path;
pub const SideEffectsData = struct {
source: *logger.Source,
range: logger.Range,
@@ -39,11 +47,13 @@ pub const Resolver = struct {
debug_logs: ?DebugLogs = null,
+ caches: cache.Cache.Set,
+
// These are sets that represent various conditions for the "exports" field
// in package.json.
- esm_conditions_default: std.StringHashMap(bool),
- esm_conditions_import: std.StringHashMap(bool),
- esm_conditions_require: std.StringHashMap(bool),
+ // esm_conditions_default: std.StringHashMap(bool),
+ // esm_conditions_import: std.StringHashMap(bool),
+ // esm_conditions_require: std.StringHashMap(bool),
// A special filtered import order for CSS "@import" imports.
//
@@ -88,6 +98,8 @@ pub const Resolver = struct {
indent: MutableString,
notes: std.ArrayList(logger.Data),
+ pub const FlushMode = enum { fail, success };
+
pub fn init(allocator: *std.mem.Allocator) DebugLogs {
return .{
.indent = MutableString.init(allocator, 0),
@@ -121,11 +133,15 @@ pub const Resolver = struct {
try d.notes.append(logger.rangeData(null, logger.Range.None, text));
}
+
+ pub fn addNoteFmt(d: *DebugLogs, comptime fmt: string, args: anytype) !void {
+ return try d.addNote(try std.fmt.allocPrint(d.notes.allocator, fmt, args));
+ }
};
pub const PathPair = struct {
- primary: logger.Path,
- secondary: ?logger.Path = null,
+ primary: Path,
+ secondary: ?Path = null,
};
pub const Result = struct {
@@ -133,18 +149,253 @@ pub const Resolver = struct {
jsx: options.JSX.Pragma = options.JSX.Pragma{},
- // plugin_data: void
+ is_external: bool = false,
+
+ different_case: ?fs.FileSystem.Entry.Lookup.DifferentCase = null,
+
+ // If present, any ES6 imports to this file can be considered to have no side
+ // effects. This means they should be removed if unused.
+ primary_side_effects_data: ?SideEffectsData = null,
+
+ // If true, the class field transform should use Object.defineProperty().
+ use_define_for_class_fields_ts: ?bool = null,
+
+ // If true, unused imports are retained in TypeScript code. This matches the
+ // behavior of the "importsNotUsedAsValues" field in "tsconfig.json" when the
+ // value is not "remove".
+ preserve_unused_imports_ts: bool = false,
+
+ // This is the "type" field from "package.json"
+ module_type: options.ModuleType,
+
+ debug_meta: ?DebugMeta = null,
+
+ pub const DebugMeta = struct {
+ notes: std.ArrayList(logger.Data),
+ suggestion_text: string = "",
+ suggestion_message: string = "",
+
+ pub fn init(allocator: *std.mem.Allocator) DebugMeta {
+ return DebugMeta{ .notes = std.ArrayList(logger.Data).init(allocator) };
+ }
+
+ pub fn logErrorMsg(m: *DebugMeta, log: *logger.Log, _source: ?*const logger.Source, r: logger.Range, comptime fmt: string, args: anytype) !void {
+ if (_source != null and m.suggestion_message.len > 0) {
+ const data = logger.rangeData(_source.?, r, m.suggestion_message);
+ data.location.?.suggestion = m.suggestion_text;
+ try m.notes.append(data);
+ }
+
+ try log.addMsg(Msg{
+ .kind = .err,
+ .data = logger.rangeData(_source, r, std.fmt.allocPrint(m.notes.allocator, fmt, args)),
+ .notes = m.toOwnedSlice(),
+ });
+ }
+ };
};
- pub fn resolve(r: *Resolver, source_dir: string, import_path: string, kind: ast.ImportKind) Result {}
+ pub fn isExternalPattern(r: *Resolver, import_path: string) bool {
+ Global.notimpl();
+ }
- fn dirInfoCached(r: *Resolver, path: string) !*DirInfo {
- // First, check the cache
- if (r.dir_cache.get(path)) |dir| {
- return dir;
+ pub fn flushDebugLogs(r: *Resolver, flush_mode: DebugLogs.FlushMode) !void {
+ if (r.debug_logs) |debug| {
+ defer {
+ debug.deinit();
+ r.debug_logs = null;
+ }
+
+ if (mode == .failure) {
+ try r.log.addRangeDebugWithNotes(null, .empty, debug.what, debug.notes.toOwnedSlice());
+ } else if (@enumToInt(r.log.level) <= @enumToInt(logger.Log.Level.verbose)) {
+ try r.log.addVerboseWithNotes(null, .empty, debug.what, debug.notes.toOwnedSlice());
+ }
+ }
+ }
+
+ pub fn resolve(r: *Resolver, source_dir: string, import_path: string, kind: ast.ImportKind) !?Result {
+ if (r.log.level == .verbose) {
+ if (r.debug_logs != null) {
+ r.debug_logs.?.deinit();
+ }
+
+ r.debug_logs = DebugLogs.init(r.allocator);
}
- const info = try r.dirInfoUncached(path);
+ // Certain types of URLs default to being external for convenience
+ if (r.isExternalPattern(import_path) or
+ // "fill: url(#filter);"
+ (kind.isFromCSS() and strings.startsWith(import_path, "#")) or
+
+ // "background: url(http://example.com/images/image.png);"
+ strings.startsWith(import_path, "http://") or
+
+ // "background: url(https://example.com/images/image.png);"
+ strings.startsWith(import_path, "https://") or
+
+ // "background: url(//example.com/images/image.png);"
+ strings.startsWith(import_path, "//"))
+ {
+ if (r.debug_logs) |debug| {
+ try debug.addNote("Marking this path as implicitly external");
+ }
+ r.flushDebugLogs(.success) catch {};
+ return Result{ .path_pair = PathPair{
+ .primary = Path{ .text = import_path },
+ .is_external = true,
+ } };
+ }
+
+ if (DataURL.parse(import_path) catch null) |_data_url| {
+ const data_url: DataURL = _data_url;
+ // "import 'data:text/javascript,console.log(123)';"
+ // "@import 'data:text/css,body{background:white}';"
+ if (data_url.decode_mime_type() != .Unsupported) {
+ if (r.debug_logs) |debug| {
+ debug.addNote("Putting this path in the \"dataurl\" namespace") catch {};
+ }
+ r.flushDebugLogs(.success) catch {};
+ return Resolver.Result{ .path_pair = PathPair{ .primary = Path{ .text = import_path, .namespace = "dataurl" } } };
+ }
+
+ // "background: url(data:image/png;base64,iVBORw0KGgo=);"
+ if (r.debug_logs) |debug| {
+ debug.addNote("Marking this \"dataurl\" as external") catch {};
+ }
+ r.flushDebugLogs(.success) catch {};
+ return Resolver.Result{
+ .path_pair = PathPair{ .primary = Path{ .text = import_path, .namespace = "dataurl" } },
+ .is_external = true,
+ };
+ }
+
+ // Fail now if there is no directory to resolve in. This can happen for
+ // virtual modules (e.g. stdin) if a resolve directory is not specified.
+ if (source_dir.len == 0) {
+ if (r.debug_logs) |debug| {
+ debug.addNote("Cannot resolve this path without a directory") catch {};
+ }
+ r.flushDebugLogs(.fail) catch {};
+ return null;
+ }
+
+ const hold = r.mutex.acquire();
+ defer hold.release();
+ }
+
+ pub fn resolveWithoutSymlinks(r: *Resolver, source_dir: string, import_path: string, kind: ast.ImportKind) !Result {
+ // This implements the module resolution algorithm from node.js, which is
+ // described here: https://nodejs.org/api/modules.html#modules_all_together
+ var result: Result = undefined;
+
+ // Return early if this is already an absolute path. In addition to asking
+ // the file system whether this is an absolute path, we also explicitly check
+ // whether it starts with a "/" and consider that an absolute path too. This
+ // is because relative paths can technically start with a "/" on Windows
+ // because it's not an absolute path on Windows. Then people might write code
+ // with imports that start with a "/" that works fine on Windows only to
+ // experience unexpected build failures later on other operating systems.
+ // Treating these paths as absolute paths on all platforms means Windows
+ // users will not be able to accidentally make use of these paths.
+ if (striongs.startsWith(import_path, "/") or std.fs.path.isAbsolutePosix(import_path)) {
+ if (r.debug_logs) |debug| {
+ debug.addNoteFmt("The import \"{s}\" is being treated as an absolute path", .{import_path}) catch {};
+ }
+
+ // First, check path overrides from the nearest enclosing TypeScript "tsconfig.json" file
+ if (try r.dirInfoCached(source_dir)) |_dir_info| {
+ const dir_info: *DirInfo = _dir_info;
+ if (dir_info.ts_config_json) |tsconfig| {
+ if (tsconfig.paths.size() > 0) {}
+ }
+ }
+ }
+ }
+
+ pub const TSConfigExtender = struct {
+ visited: *StringBoolMap,
+ file_dir: string,
+ r: *Resolver,
+
+ pub fn extends(ctx: *TSConfigExtender, extends: String, range: logger.Range) ?*TSConfigJSON {
+ Global.notimpl();
+ // if (isPackagePath(extends)) {
+ // // // If this is a package path, try to resolve it to a "node_modules"
+ // // // folder. This doesn't use the normal node module resolution algorithm
+ // // // both because it's different (e.g. we don't want to match a directory)
+ // // // and because it would deadlock since we're currently in the middle of
+ // // // populating the directory info cache.
+ // // var current = ctx.file_dir;
+ // // while (true) {
+ // // // Skip "node_modules" folders
+ // // if (!strings.eql(std.fs.path.basename(current), "node_modules")) {
+ // // var paths1 = [_]string{ current, "node_modules", extends };
+ // // var join1 = std.fs.path.join(ctx.r.allocator, &paths1) catch unreachable;
+ // // const res = ctx.r.parseTSConfig(join1, ctx.visited) catch |err| {
+ // // if (err == error.ENOENT) {
+ // // continue;
+ // // } else if (err == error.ParseErrorImportCycle) {} else if (err != error.ParseErrorAlreadyLogged) {}
+ // // return null;
+ // // };
+ // // return res;
+
+ // // }
+ // // }
+ // }
+ }
+ };
+
+ pub fn parseTSConfig(r: *Resolver, file: string, visited: *StringBoolMap) !?*TSConfigJSON {
+ if (visited.contains(file)) {
+ return error.ParseErrorImportCycle;
+ }
+ visited.put(file, true) catch unreachable;
+ const entry = try r.caches.fs.readFile(r.fs, file);
+ const key_path = Path.init(file);
+
+ const source = logger.Source{
+ .key_path = key_path,
+ .pretty_path = r.prettyPath(key_path),
+ .contents = entry.contents,
+ };
+ const file_dir = std.fs.path.dirname(file);
+
+ var result = try TSConfigJSON.parse(r.allocator, r.log, r.opts, r.caches.json) orelse return null;
+
+ if (result.base_url) |base| {
+ // this might leak
+ if (!std.fs.path.isAbsolute(base)) {
+ var paths = [_]string{ file_dir, base };
+ result.base_url = std.fs.path.join(r.allocator, paths) catch unreachable;
+ }
+ }
+
+ if (result.paths.count() > 0 and (result.base_url_for_paths.len == 0 or !std.fs.path.isAbsolute(result.base_url_for_paths))) {
+ // this might leak
+ var paths = [_]string{ file_dir, base };
+ result.base_url_for_paths = std.fs.path.join(r.allocator, paths) catch unreachable;
+ }
+
+ return result;
+ }
+
+ // TODO:
+ pub fn prettyPath(r: *Resolver, path: Ptah) string {
+ return path.text;
+ }
+
+ pub fn parsePackageJSON(r: *Resolver, file: string) !?*PackageJSON {
+ return try PackageJSON.parse(r, file);
+ }
+
+ pub fn isPackagePath(path: string) bool {
+ // this could probably be flattened into something more optimized
+ return path[0] != '/' and !strings.startsWith(path, "./") and !strings.startsWith(path, "../") and !strings.eql(path, ".") and !strings.eql(path, "..");
+ }
+
+ fn dirInfoCached(r: *Resolver, path: string) !*DirInfo {
+ const info = r.dir_cache.get(path) orelse try r.dirInfoUncached(path);
try r.dir_cache.put(path, info);
}
@@ -215,5 +466,87 @@ pub const Resolver = struct {
}
// Propagate the browser scope into child directories
+ if (parent) |parent_info| {
+ info.enclosing_browser_scope = parent_info.enclosing_browser_scope;
+
+ // Make sure "absRealPath" is the real path of the directory (resolving any symlinks)
+ if (!r.opts.preserve_symlinks) {
+ if (parent_info.entries.get(base)) |entry| {
+ var symlink = entry.symlink(rfs);
+ if (symlink.len > 0) {
+ if (r.debug_logs) |logs| {
+ try logs.addNote(std.fmt.allocPrint(r.allocator, "Resolved symlink \"{s}\" to \"{s}\"", .{ path, symlink }));
+ }
+ info.abs_real_path = symlink;
+ } else if (parent_info.abs_real_path.len > 0) {
+ // this might leak a little i'm not sure
+ const parts = [_]string{ parent_info.abs_real_path, base };
+ symlink = std.fs.path.join(r.allocator, &parts);
+ if (r.debug_logs) |logs| {
+ try logs.addNote(std.fmt.allocPrint(r.allocator, "Resolved symlink \"{s}\" to \"{s}\"", .{ path, symlink }));
+ }
+ info.abs_real_path = symlink;
+ }
+ }
+ }
+ }
+
+ // Record if this directory has a package.json file
+ if (entries.get("package.json")) |entry| {
+ if (entry.kind(rfs) == .file) {
+ info.package_json = r.parsePackageJSON(path);
+
+ if (info.package_json) |pkg| {
+ if (pkg.browser_map != null) {
+ info.enclosing_browser_scope = info;
+ }
+
+ if (r.debug_logs) |logs| {
+ try logs.addNote(std.fmt.allocPrint(r.allocator, "Resolved package.json in \"{s}\"", .{
+ path,
+ }));
+ }
+ }
+ }
+ }
+
+ // Record if this directory has a tsconfig.json or jsconfig.json file
+ {
+ var tsconfig_path: ?string = null;
+ if (r.opts.tsconfig_override == null) {
+ var entry = entries.get("tsconfig.json");
+ if (entry.kind(rfs) == .file) {
+ const parts = [_]string{ path, "tsconfig.json" };
+ tsconfig_path = try std.fs.path.join(r.allocator, parts);
+ } else if (entries.get("jsconfig.json")) |jsconfig| {
+ if (jsconfig.kind(rfs) == .file) {
+ const parts = [_]string{ path, "jsconfig.json" };
+ tsconfig_path = try std.fs.path.join(r.allocator, parts);
+ }
+ }
+ } else if (parent == null) {
+ tsconfig_path = r.opts.tsconfig_override.?;
+ }
+
+ if (tsconfig_path) |tsconfigpath| {
+ var visited = std.StringHashMap(bool).init(r.allocator);
+ defer visited.deinit();
+ info.ts_config_json = r.parseTSConfig(tsconfigpath, visited) catch |err| {
+ const pretty = r.prettyPath(fs.Path{ .text = tsconfigpath, .namespace = "file" });
+
+ if (err == error.ENOENT) {
+ r.log.addErrorFmt(null, .empty, r.allocator, "Cannot find tsconfig file \"{s}\"", .{pretty});
+ } else if (err != error.ParseErrorAlreadyLogged) {
+ r.log.addErrorFmt(null, .empty, r.allocator, "Cannot read file \"{s}\": {s}", .{ pretty, @errorName(err) });
+ }
+ };
+ }
+ }
+
+ if (info.ts_config_json == null and parent != null) {
+ info.ts_config_json = parent.?.tsconfig_json;
+ }
+
+ return info;
}
};
diff --git a/src/resolver/tsconfig_json.zig b/src/resolver/tsconfig_json.zig
new file mode 100644
index 000000000..dd952b65f
--- /dev/null
+++ b/src/resolver/tsconfig_json.zig
@@ -0,0 +1,319 @@
+usingnamespace @import("../global.zig");
+const std = @import("std");
+const options = @import("../options.zig");
+const log = @import("../logger.zig");
+const cache = @import("../cache.zig");
+const logger = @import("../logger.zig");
+const js_ast = @import("../js_ast.zig");
+const alloc = @import("../alloc.zig");
+
+const PathsMap = std.StringHashMap([]string);
+
+pub const TSConfigJSON = struct {
+ abs_path: string,
+
+ // The absolute path of "compilerOptions.baseUrl"
+ base_url: ?string = null,
+
+ // This is used if "Paths" is non-nil. It's equal to "BaseURL" except if
+ // "BaseURL" is missing, in which case it is as if "BaseURL" was ".". This
+ // is to implement the "paths without baseUrl" feature from TypeScript 4.1.
+ // More info: https://github.com/microsoft/TypeScript/issues/31869
+ base_url_for_paths = "",
+
+ // The verbatim values of "compilerOptions.paths". The keys are patterns to
+ // match and the values are arrays of fallback paths to search. Each key and
+ // each fallback path can optionally have a single "*" wildcard character.
+ // If both the key and the value have a wildcard, the substring matched by
+ // the wildcard is substituted into the fallback path. The keys represent
+ // module-style path names and the fallback paths are relative to the
+ // "baseUrl" value in the "tsconfig.json" file.
+ paths: PathsMap,
+
+ jsx: options.JSX.Pragma = options.JSX.Pragma{},
+
+ use_define_for_class_fields: ?bool = null,
+
+ preserve_imports_not_used_as_values: bool = false,
+
+ pub const ImportsNotUsedAsValue = enum {
+ preserve,
+ err,
+ remove,
+ invalid,
+
+ pub const List = std.ComptimeStringMap(ImportsNotUsedAsValue, .{
+ .{ "preserve", ImportsNotUsedAsValue.preserve },
+ .{ "error", ImportsNotUsedAsValue.err },
+ .{ "remove", ImportsNotUsedAsValue.remove },
+ });
+ };
+
+ pub fn parse(
+ allocator: *std.mem.Allocator,
+ log: *logger.Log,
+ source: logger.Source,
+ opts: options.TransformOptions,
+ json_cache: *cache.Cache.Json,
+ ) anyerror!?*TSConfigJSON {
+ // Unfortunately "tsconfig.json" isn't actually JSON. It's some other
+ // format that appears to be defined by the implementation details of the
+ // TypeScript compiler.
+ //
+ // Attempt to parse it anyway by modifying the JSON parser, but just for
+ // these particular files. This is likely not a completely accurate
+ // emulation of what the TypeScript compiler does (e.g. string escape
+ // behavior may also be different).
+ const json: js_ast.Expr = (json_cache.parseTSConfig(log, opts, source, allocator) catch null) orelse return null;
+
+ var result: TSConfigJSON = TSConfigJSON{ .abs_path = source.key_path.text, .paths = PathsMap.init(allocator) };
+ errdefer allocator.free(result.paths);
+ if (extends != null) {
+ if (json.getProperty("extends")) |extends_value| {
+ log.addWarning(&source, extends_value.loc, "\"extends\" is not implemented yet") catch unreachable;
+ // if ((extends_value.expr.getString(allocator) catch null)) |str| {
+ // if (extends(str, source.rangeOfString(extends_value.loc))) |base| {
+ // result.jsx = base.jsx;
+ // result.base_url_for_paths = base.base_url_for_paths;
+ // result.use_define_for_class_fields = base.use_define_for_class_fields;
+ // result.preserve_imports_not_used_as_values = base.preserve_imports_not_used_as_values;
+ // // https://github.com/microsoft/TypeScript/issues/14527#issuecomment-284948808
+ // result.paths = base.paths;
+ // }
+ // }
+ }
+ }
+
+ // Parse "compilerOptions"
+ if (json.getProperty("compilerOptions")) |compiler_opts| {
+ // Parse "baseUrl"
+ if (compiler_opts.expr.getProperty("baseUrl")) |base_url_prop| {
+ // maybe we should add a warning when it exists but the value is an array or osmething invalid?
+ if ((base_url_prop.expr.getString(allocator) catch null)) |base_url| {
+ result.base_url = base_url;
+ }
+ }
+
+ // Parse "jsxFactory"
+ if (compiler_opts.expr.getProperty("jsxFactory")) |jsx_prop| {
+ if (jsx_prop.expr.getString(allocator)) |str| {
+ result.jsx.factory = try parseMemberExpressionForJSX(log, source, jsx_prop.loc, str, allocator);
+ }
+ }
+
+ // Parse "jsxFragmentFactory"
+ if (compiler_opts.expr.getProperty("jsxFactory")) |jsx_prop| {
+ if (jsx_prop.expr.getString(allocator)) |str| {
+ result.jsx.fragment = try parseMemberExpressionForJSX(log, source, jsx_prop.loc, str, allocator);
+ }
+ }
+
+ // Parse "jsxImportSource"
+ if (compiler_opts.expr.getProperty("jsxImportSource")) |jsx_factory_prop| {
+ if (jsx_prop.expr.getString(allocator)) |str| {
+ result.jsx.import_source = str;
+ }
+ }
+
+ // Parse "useDefineForClassFields"
+ if (compiler_opts.expr.getProperty("useDefineForClassFields")) |use_define_value_prop| {
+ if (use_define_value_prop.expr.getBool()) |val| {
+ result.use_define_for_class_fields = val;
+ }
+ }
+
+ // Parse "importsNotUsedAsValues"
+ if (compiler_opts.expr.getProperty("importsNotUsedAsValues")) |imports_not_used_as_values_prop| {
+ // This should never allocate since it will be utf8
+ if ((jsx_prop.expr.getString(allocator) catch null)) |str| {
+ switch (ImportsNotUsedAsValue.List.get(str) orelse ImportsNotUsedAsValue.invalid) {
+ .preserve, .err => {
+ result.preserve_imports_not_used_as_values = true;
+ },
+ .remove => {},
+ else => {
+ log.addRangeWarningFmt(source, source.rangeOfString(imports_not_used_as_values_prop.loc), allocator, "Invalid value \"{s}\" for \"importsNotUsedAsValues\"", .{str}) catch {};
+ },
+ }
+ }
+ }
+
+ // Parse "paths"
+ if (compiler_opts.expr.getProperty("paths")) |paths_prop| {
+ switch (paths_prop.expr.data) {
+ .e_object => |paths| {
+ result.base_url_for_paths = result.base_url orelse ".";
+ result.paths = PathsMap.init(allocator);
+ for (paths.properties) |property| {
+ const key_prop = property.key orelse continue;
+ const key = (key_prop.getString(allocator) catch null) orelse continue;
+
+ if (!TSConfigJSON.isValidTSConfigPathNoBaseURLPattern(key, log, source, key_prop.loc)) {
+ continue;
+ }
+
+ const value_prop = property.value orelse continue;
+
+ // The "paths" field is an object which maps a pattern to an
+ // array of remapping patterns to try, in priority order. See
+ // the documentation for examples of how this is used:
+ // https://www.typescriptlang.org/docs/handbook/module-resolution.html#path-mapping.
+ //
+ // One particular example:
+ //
+ // {
+ // "compilerOptions": {
+ // "baseUrl": "projectRoot",
+ // "paths": {
+ // "*": [
+ // "*",
+ // "generated/*"
+ // ]
+ // }
+ // }
+ // }
+ //
+ // Matching "folder1/file2" should first check "projectRoot/folder1/file2"
+ // and then, if that didn't work, also check "projectRoot/generated/folder1/file2".
+ switch (value_prop.data) {
+ .e_array => |array| {
+ if (array.items.len > 0) {
+ var paths = allocator.alloc(string, array.items.len) catch unreachable;
+ errdefer allocator.free(paths);
+ var count: usize = 0;
+ for (array.items) |expr| {
+ if ((expr.getString(allocator) catch null)) |str| {
+ if (TSConfigJSON.isValidTSConfigPathPattern(str, log, source, loc, allocator) and
+ (has_base_url or
+ TSConfigJSON.isValidTSConfigPathNoBaseURLPattern(
+ str,
+ log,
+ source,
+ loc,
+ ))) {
+ paths[count] = str;
+ count += 1;
+ }
+ }
+ }
+ if (count > 0) {
+ result.paths.put(
+ key,
+ paths[0..count],
+ ) catch unreachable;
+ }
+ }
+ },
+ else => {
+ log.addRangeWarningFmt(
+ source,
+ log,
+ allocator,
+ "Substitutions for pattern \"{s}\" should be an array",
+ .{key},
+ ) catch {};
+ },
+ }
+ }
+ },
+ else => {},
+ }
+ }
+ }
+
+ var _result = allocator.create(TSConfigJSON) catch unreachable;
+ _result.* = result;
+ return _result;
+ }
+
+ pub fn isValidTSConfigPathPattern(text: string, log: *logger.Log, source: *logger.Source, loc: logger.Loc, allocator: *std.mem.Allocator) bool {
+ var found_asterisk = false;
+ for (text) |c, i| {
+ if (c == '*') {
+ if (found_asterisk) {
+ const r = source.rangeOfString(loc);
+ log.addRangeWarningFmt(source, r, allocator, "Invalid pattern \"{s}\", must have at most one \"*\" character", .{text}) catch {};
+ return false;
+ }
+ found_asterisk = true;
+ }
+ }
+
+ return true;
+ }
+
+ pub fn parseMemberExpressionForJSX(log: *logger.Log, source: *logger.Source, loc: logger.Loc, text: string, allocator: *std.mem.Allocator) ![]string {
+ if (text.len == 0) {
+ return &([_]string{});
+ }
+ const parts_count = std.mem.count(u8, text, ".");
+ const parts = allocator.alloc(string, parts_count) catch unreachable;
+ var iter = std.mem.tokenize(text, ".");
+ var i: usize = 0;
+ while (iter.next()) |part| {
+ if (!js_lexer.isIdentifier(part)) {
+ const warn = source.rangeOfString(loc);
+ log.addRangeWarningFmt(source, warn, allocator, "Invalid JSX member expression: \"{s}\"", .{part}) catch {};
+ return &([_]string{});
+ }
+ parts[i] = part;
+ i += 1;
+ }
+
+ return parts;
+ }
+
+ pub fn isSlash(c: u8) bool {
+ return c == '/' or c == '\\';
+ }
+
+ pub fn isValidTSConfigPathNoBaseURLPattern(text: string, log: logger.Log, source: *logger.Source, loc: logger.Loc) bool {
+ var c0: u8 = 0;
+ var c1: u8 = 0;
+ var c2: u8 = 0;
+ const n = text.len;
+
+ switch (n) {
+ 0 => {
+ return false;
+ },
+ // Relative "." or ".."
+
+ 1 => {
+ return text[0] == '.';
+ },
+ // "..", ".\", "./"
+ 2 => {
+ return text[0] == '.' and (text[1] == '.' or text[1] == '\\' or text[1] == '/');
+ },
+ else => {
+ c0 = text[0];
+ c1 = text[1];
+ c2 = text[2];
+ },
+ }
+
+ // Relative "./" or "../" or ".\\" or "..\\"
+ if (c0 == '.' and (TSConfigJSON.isSlash(c1) or (c1 == '.' and TSConfigJSON.isSlash(c2)))) {
+ return true;
+ }
+
+ // Absolute DOS "c:/" or "c:\\"
+ if (c1 == ':' and TSConfigJSON.isSlash(c2)) {
+ switch (c0) {
+ 'a'...'z', 'A'...'Z' => {
+ return true;
+ },
+ else => {},
+ }
+ }
+
+ const r = source.rangeOfString(loc);
+ log.addRangeWarningFmt(source, r, allocator, "Non-relative path \"{s}\" is not allowed when \"baseUrl\" is not set (did you forget a leading \"./\"?)", .{text}) catch {};
+ return false;
+ }
+};
+
+test "tsconfig.json" {
+ try alloc.setup(std.heap.c_allocator);
+}
diff --git a/src/string_mutable.zig b/src/string_mutable.zig
index 610f35a0a..7e9ea5aa6 100644
--- a/src/string_mutable.zig
+++ b/src/string_mutable.zig
@@ -106,9 +106,6 @@ pub const MutableString = struct {
try self.list.ensureUnusedCapacity(self.allocator, amount);
}
- pub fn deinit(self: *MutableString) !void {
- self.list.deinit(self.allocator);
- }
pub fn appendChar(self: *MutableString, char: u8) callconv(.Inline) !void {
try self.list.append(self.allocator, char);
}