aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rw-r--r--examples/next/package.json4
-rw-r--r--packages/bun-framework-next/README.md22
-rw-r--r--packages/bun-framework-next/client.development.tsx78
-rw-r--r--packages/bun-framework-next/page-loader.ts4
-rw-r--r--packages/bun-framework-next/polyfills.tsx5
-rw-r--r--packages/bun-framework-next/renderDocument.tsx20
-rw-r--r--packages/bun-framework-next/text-encoder-polyfill.js302
-rw-r--r--src/bundler.zig7
-rw-r--r--src/cli/bun_command.zig1
-rw-r--r--src/cli/create_command.zig2
-rw-r--r--src/cli/dev_command.zig4
-rw-r--r--src/cli/run_command.zig4
-rw-r--r--src/global.zig22
-rw-r--r--src/import_record.zig2
-rw-r--r--src/js_parser/js_parser.zig10
-rw-r--r--src/logger.zig24
17 files changed, 466 insertions, 47 deletions
diff --git a/README.md b/README.md
index ca5000b60..3481ab903 100644
--- a/README.md
+++ b/README.md
@@ -76,7 +76,7 @@ Here's what doesn't work yet:
- locales, zones, `assetPrefix` (workaround: change `--origin \"http://localhsot:3000/assetPrefixInhere\"`)
- `next/image` is polyfilled to a regular `<img src>` tag.
- `proxy` and anything else in `next.config.js`
-- API routes
+- API routes, middleware (middleware is easier to support though! similar SSR API)
When using Next.js, Bun automatically reads configuration from `.env.local`, `.env.development` and `.env` (in that order). `process.env.NEXT_PUBLIC_` and `process.env.NEXT_` automatically are replaced via `--define`.
diff --git a/examples/next/package.json b/examples/next/package.json
index 55c57d6a9..6acde1e1e 100644
--- a/examples/next/package.json
+++ b/examples/next/package.json
@@ -10,8 +10,8 @@
},
"devDependencies": {
"@types/react": "^17.0.19",
- "bun-framework-next": "^0.0.0-21",
- "typescript": "^4.3.5"
+ "bun-framework-next": "^12",
+ "typescript": "^4.4.5"
},
"bun-create": {
"postinstall": [
diff --git a/packages/bun-framework-next/README.md b/packages/bun-framework-next/README.md
new file mode 100644
index 000000000..6c3d3e324
--- /dev/null
+++ b/packages/bun-framework-next/README.md
@@ -0,0 +1,22 @@
+# bun-framework-next
+
+This package lets you use Next.js 12 with Bun. This readme assumes you already installed Bun.
+
+To start a new project:
+
+```bash
+bun create next --open
+```
+
+To use Next.js 12 with an existing project:
+
+```bash
+npm install bun-framework-next
+bun bun --use next
+```
+
+Launch bun:
+
+```bash
+bun
+```
diff --git a/packages/bun-framework-next/client.development.tsx b/packages/bun-framework-next/client.development.tsx
index df58151f9..dcc1d915c 100644
--- a/packages/bun-framework-next/client.development.tsx
+++ b/packages/bun-framework-next/client.development.tsx
@@ -279,6 +279,10 @@ function AppContainer({
);
}
+let reactRoot: any = null;
+
+const USE_REACT_18 = "hydrateRoot" in ReactDOM;
+
export async function _boot(EntryPointNamespace, isError) {
NextRouteLoader.default.getClientBuildManifest = () => Promise.resolve({});
@@ -338,25 +342,31 @@ export async function _boot(EntryPointNamespace, isError) {
});
globalThis.next.router = router;
+ const domEl = document.querySelector("#__next");
+ const reactEl = (
+ <TopLevelRender
+ App={CachedApp}
+ Component={PageComponent}
+ props={hydrateProps}
+ />
+ );
- if (isError) {
- ReactDOM.render(
- <TopLevelRender
- App={CachedApp}
- Component={PageComponent}
- props={hydrateProps}
- />,
- document.querySelector("#__next")
- );
+ if (USE_REACT_18) {
+ if (!reactRoot) {
+ // Unlike with createRoot, you don't need a separate root.render() call here
+ reactRoot = (isError ? ReactDOM.createRoot : ReactDOM.hydrateRoot)(
+ domEl,
+ reactEl
+ );
+ } else {
+ reactRoot.render(reactEl);
+ }
} else {
- ReactDOM.hydrate(
- <TopLevelRender
- App={CachedApp}
- Component={PageComponent}
- props={hydrateProps}
- />,
- document.querySelector("#__next")
- );
+ if (isError) {
+ ReactDOM.render(reactEl, domEl);
+ } else {
+ ReactDOM.hydrate(reactEl, domEl);
+ }
}
}
@@ -369,23 +379,41 @@ function TopLevelRender({ App, Component, props, scroll }) {
}
export function render(props) {
- ReactDOM.render(
- <TopLevelRender {...props} />,
- document.querySelector("#__next")
- );
+ if (USE_REACT_18) {
+ reactRoot.render(<TopLevelRender {...props} />);
+ } else {
+ ReactDOM.render(
+ <TopLevelRender {...props} />,
+ document.getElementById("__next")
+ );
+ }
}
export function renderError(e) {
- ReactDOM.render(
+ const reactEl = (
<AppContainer>
<App Component={<div>UH OH!!!!</div>} pageProps={data.props}></App>
- </AppContainer>,
- document.querySelector("#__next")
+ </AppContainer>
);
+
+ if (USE_REACT_18) {
+ if (!reactRoot) {
+ const domEl = document.querySelector("#__next");
+
+ // Unlike with createRoot, you don't need a separate root.render() call here
+ reactRoot = ReactDOM.createRoot(domEl, reactEl);
+ } else {
+ reactRoot.render(reactEl);
+ }
+ } else {
+ const domEl = document.querySelector("#__next");
+
+ ReactDOM.render(reactEl, domEl);
+ }
}
globalThis.next = {
- version: "11.1.2",
+ version: "12.0.2",
emitter,
render,
renderError,
diff --git a/packages/bun-framework-next/page-loader.ts b/packages/bun-framework-next/page-loader.ts
index b0c69a1c2..7bb6c2a25 100644
--- a/packages/bun-framework-next/page-loader.ts
+++ b/packages/bun-framework-next/page-loader.ts
@@ -62,6 +62,10 @@ export default class PageLoader extends NextPageLoader {
return this.pageList;
}
+ async getMiddlewareList() {
+ return [];
+ }
+
cssQueue = [];
onImportCSS = (event) => {
diff --git a/packages/bun-framework-next/polyfills.tsx b/packages/bun-framework-next/polyfills.tsx
index b000c1f54..474ee6ec9 100644
--- a/packages/bun-framework-next/polyfills.tsx
+++ b/packages/bun-framework-next/polyfills.tsx
@@ -21,3 +21,8 @@ React.Children.only = function (children) {
return onlyChildPolyfill(children);
};
globalThis.URL = URL;
+
+import { TextEncoder, TextDecoder } from "./text-encoder-polyfill";
+
+globalThis.TextEncoder ||= TextEncoder;
+globalThis.TextDecoder ||= TextDecoder;
diff --git a/packages/bun-framework-next/renderDocument.tsx b/packages/bun-framework-next/renderDocument.tsx
index 463395863..615224f0d 100644
--- a/packages/bun-framework-next/renderDocument.tsx
+++ b/packages/bun-framework-next/renderDocument.tsx
@@ -243,6 +243,7 @@ function renderDocument(
scriptLoader,
locale,
disableOptimizedLoading,
+ useMaybeDeferContent,
...docProps,
};
return (
@@ -567,6 +568,17 @@ export async function render({
</AppContainer>
);
},
+ defaultGetInitialProps: async (
+ docCtx: DocumentContext
+ ): Promise<DocumentInitialProps> => {
+ const enhanceApp = (AppComp: any) => {
+ return (props: any) => <AppComp {...props} />;
+ };
+
+ const { html, head } = await docCtx.renderPage({ enhanceApp });
+ // const styles = jsxStyleRegistry.styles();
+ return { html, head };
+ },
};
var props = await loadGetInitialProps(AppComponent, {
@@ -789,6 +801,7 @@ export async function render({
isPreview: isPreview === true ? true : undefined,
autoExport: isAutoExport === true ? true : undefined,
nextExport: nextExport === true ? true : undefined,
+ useMaybeDeferContent,
});
const bodyRenderIdx = html.indexOf(BODY_RENDER_TARGET);
html =
@@ -812,3 +825,10 @@ export async function render({
);
}
}
+
+export function useMaybeDeferContent(
+ _name: string,
+ contentFn: () => JSX.Element
+): [boolean, JSX.Element] {
+ return [false, contentFn()];
+}
diff --git a/packages/bun-framework-next/text-encoder-polyfill.js b/packages/bun-framework-next/text-encoder-polyfill.js
new file mode 100644
index 000000000..3dd95009f
--- /dev/null
+++ b/packages/bun-framework-next/text-encoder-polyfill.js
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2017 Sam Thorogood. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+/**
+ * @fileoverview Polyfill for TextEncoder and TextDecoder.
+ *
+ * You probably want `text.min.js`, and not this file directly.
+ */
+
+// used for FastTextDecoder
+const validUtfLabels = ["utf-8", "utf8", "unicode-1-1-utf-8"];
+
+/**
+ * @constructor
+ */
+function FastTextEncoder() {
+ // This does not accept an encoding, and always uses UTF-8:
+ // https://www.w3.org/TR/encoding/#dom-textencoder
+}
+
+Object.defineProperty(FastTextEncoder.prototype, "encoding", {
+ value: "utf-8",
+});
+
+/**
+ * @param {string} string
+ * @param {{stream: boolean}=} options
+ * @return {!Uint8Array}
+ */
+FastTextEncoder.prototype["encode"] = function (
+ string,
+ options = { stream: false }
+) {
+ if (options.stream) {
+ throw new Error(`Failed to encode: the 'stream' option is unsupported.`);
+ }
+
+ let pos = 0;
+ const len = string.length;
+
+ let at = 0; // output position
+ let tlen = Math.max(32, len + (len >>> 1) + 7); // 1.5x size
+ let target = new Uint8Array((tlen >>> 3) << 3); // ... but at 8 byte offset
+
+ while (pos < len) {
+ let value = string.charCodeAt(pos++);
+ if (value >= 0xd800 && value <= 0xdbff) {
+ // high surrogate
+ if (pos < len) {
+ const extra = string.charCodeAt(pos);
+ if ((extra & 0xfc00) === 0xdc00) {
+ ++pos;
+ value = ((value & 0x3ff) << 10) + (extra & 0x3ff) + 0x10000;
+ }
+ }
+ if (value >= 0xd800 && value <= 0xdbff) {
+ continue; // drop lone surrogate
+ }
+ }
+
+ // expand the buffer if we couldn't write 4 bytes
+ if (at + 4 > target.length) {
+ tlen += 8; // minimum extra
+ tlen *= 1.0 + (pos / string.length) * 2; // take 2x the remaining
+ tlen = (tlen >>> 3) << 3; // 8 byte offset
+
+ const update = new Uint8Array(tlen);
+ update.set(target);
+ target = update;
+ }
+
+ if ((value & 0xffffff80) === 0) {
+ // 1-byte
+ target[at++] = value; // ASCII
+ continue;
+ } else if ((value & 0xfffff800) === 0) {
+ // 2-byte
+ target[at++] = ((value >>> 6) & 0x1f) | 0xc0;
+ } else if ((value & 0xffff0000) === 0) {
+ // 3-byte
+ target[at++] = ((value >>> 12) & 0x0f) | 0xe0;
+ target[at++] = ((value >>> 6) & 0x3f) | 0x80;
+ } else if ((value & 0xffe00000) === 0) {
+ // 4-byte
+ target[at++] = ((value >>> 18) & 0x07) | 0xf0;
+ target[at++] = ((value >>> 12) & 0x3f) | 0x80;
+ target[at++] = ((value >>> 6) & 0x3f) | 0x80;
+ } else {
+ continue; // out of range
+ }
+
+ target[at++] = (value & 0x3f) | 0x80;
+ }
+
+ // Use subarray if slice isn't supported (IE11). This will use more memory
+ // because the original array still exists.
+ return target.slice ? target.slice(0, at) : target.subarray(0, at);
+};
+
+/**
+ * @constructor
+ * @param {string=} utfLabel
+ * @param {{fatal: boolean}=} options
+ */
+function FastTextDecoder(utfLabel = "utf-8", options = { fatal: false }) {
+ if (validUtfLabels.indexOf(utfLabel.toLowerCase()) === -1) {
+ throw new RangeError(
+ `Failed to construct 'TextDecoder': The encoding label provided ('${utfLabel}') is invalid.`
+ );
+ }
+ if (options.fatal) {
+ throw new Error(
+ `Failed to construct 'TextDecoder': the 'fatal' option is unsupported.`
+ );
+ }
+}
+
+Object.defineProperty(FastTextDecoder.prototype, "encoding", {
+ value: "utf-8",
+});
+
+Object.defineProperty(FastTextDecoder.prototype, "fatal", { value: false });
+
+Object.defineProperty(FastTextDecoder.prototype, "ignoreBOM", {
+ value: false,
+});
+
+/**
+ * @param {!Uint8Array} bytes
+ * @return {string}
+ */
+function decodeBuffer(bytes) {
+ return Buffer.from(bytes.buffer, bytes.byteOffset, bytes.byteLength).toString(
+ "utf-8"
+ );
+}
+
+/**
+ * @param {!Uint8Array} bytes
+ * @return {string}
+ */
+function decodeSyncXHR(bytes) {
+ const b = new Blob([bytes], { type: "text/plain;charset=UTF-8" });
+ const u = URL.createObjectURL(b);
+
+ // This hack will fail in non-Edgium Edge because sync XHRs are disabled (and
+ // possibly in other places), so ensure there's a fallback call.
+ try {
+ const x = new XMLHttpRequest();
+ x.open("GET", u, false);
+ x.send();
+ return x.responseText;
+ } catch (e) {
+ return decodeFallback(bytes);
+ } finally {
+ URL.revokeObjectURL(u);
+ }
+}
+
+/**
+ * @param {!Uint8Array} bytes
+ * @return {string}
+ */
+function decodeFallback(bytes) {
+ let inputIndex = 0;
+
+ // Create a working buffer for UTF-16 code points, but don't generate one
+ // which is too large for small input sizes. UTF-8 to UCS-16 conversion is
+ // going to be at most 1:1, if all code points are ASCII. The other extreme
+ // is 4-byte UTF-8, which results in two UCS-16 points, but this is still 50%
+ // fewer entries in the output.
+ const pendingSize = Math.min(256 * 256, bytes.length + 1);
+ const pending = new Uint16Array(pendingSize);
+ const chunks = [];
+ let pendingIndex = 0;
+
+ for (;;) {
+ const more = inputIndex < bytes.length;
+
+ // If there's no more data or there'd be no room for two UTF-16 values,
+ // create a chunk. This isn't done at the end by simply slicing the data
+ // into equal sized chunks as we might hit a surrogate pair.
+ if (!more || pendingIndex >= pendingSize - 1) {
+ // nb. .apply and friends are *really slow*. Low-hanging fruit is to
+ // expand this to literally pass pending[0], pending[1], ... etc, but
+ // the output code expands pretty fast in this case.
+ chunks.push(
+ String.fromCharCode.apply(null, pending.subarray(0, pendingIndex))
+ );
+
+ if (!more) {
+ return chunks.join("");
+ }
+
+ // Move the buffer forward and create another chunk.
+ bytes = bytes.subarray(inputIndex);
+ inputIndex = 0;
+ pendingIndex = 0;
+ }
+
+ // The native TextDecoder will generate "REPLACEMENT CHARACTER" where the
+ // input data is invalid. Here, we blindly parse the data even if it's
+ // wrong: e.g., if a 3-byte sequence doesn't have two valid continuations.
+
+ const byte1 = bytes[inputIndex++];
+ if ((byte1 & 0x80) === 0) {
+ // 1-byte or null
+ pending[pendingIndex++] = byte1;
+ } else if ((byte1 & 0xe0) === 0xc0) {
+ // 2-byte
+ const byte2 = bytes[inputIndex++] & 0x3f;
+ pending[pendingIndex++] = ((byte1 & 0x1f) << 6) | byte2;
+ } else if ((byte1 & 0xf0) === 0xe0) {
+ // 3-byte
+ const byte2 = bytes[inputIndex++] & 0x3f;
+ const byte3 = bytes[inputIndex++] & 0x3f;
+ pending[pendingIndex++] = ((byte1 & 0x1f) << 12) | (byte2 << 6) | byte3;
+ } else if ((byte1 & 0xf8) === 0xf0) {
+ // 4-byte
+ const byte2 = bytes[inputIndex++] & 0x3f;
+ const byte3 = bytes[inputIndex++] & 0x3f;
+ const byte4 = bytes[inputIndex++] & 0x3f;
+
+ // this can be > 0xffff, so possibly generate surrogates
+ let codepoint =
+ ((byte1 & 0x07) << 0x12) | (byte2 << 0x0c) | (byte3 << 0x06) | byte4;
+ if (codepoint > 0xffff) {
+ // codepoint &= ~0x10000;
+ codepoint -= 0x10000;
+ pending[pendingIndex++] = ((codepoint >>> 10) & 0x3ff) | 0xd800;
+ codepoint = 0xdc00 | (codepoint & 0x3ff);
+ }
+ pending[pendingIndex++] = codepoint;
+ } else {
+ // invalid initial byte
+ }
+ }
+}
+
+// Decoding a string is pretty slow, but use alternative options where possible.
+let decodeImpl = decodeFallback;
+if (typeof Buffer === "function" && Buffer.from) {
+ // Buffer.from was added in Node v5.10.0 (2015-11-17).
+ decodeImpl = decodeBuffer;
+} else if (
+ typeof Blob === "function" &&
+ typeof URL === "function" &&
+ typeof URL.createObjectURL === "function"
+) {
+ // Blob and URL.createObjectURL are available from IE10, Safari 6, Chrome 19
+ // (all released in 2012), Firefox 19 (2013), ...
+ decodeImpl = decodeSyncXHR;
+}
+
+/**
+ * @param {(!ArrayBuffer|!ArrayBufferView)} buffer
+ * @param {{stream: boolean}=} options
+ * @return {string}
+ */
+FastTextDecoder.prototype["decode"] = function (
+ buffer,
+ options = { stream: false }
+) {
+ if (options["stream"]) {
+ throw new Error(`Failed to decode: the 'stream' option is unsupported.`);
+ }
+
+ let bytes;
+
+ if (buffer instanceof Uint8Array) {
+ // Accept Uint8Array instances as-is.
+ bytes = buffer;
+ } else if (buffer.buffer instanceof ArrayBuffer) {
+ // Look for ArrayBufferView, which isn't a real type, but basically
+ // represents all the valid TypedArray types plus DataView. They all have
+ // ".buffer" as an instance of ArrayBuffer.
+ bytes = new Uint8Array(buffer.buffer);
+ } else {
+ // The only other valid argument here is that "buffer" is an ArrayBuffer.
+ // We also try to convert anything else passed to a Uint8Array, as this
+ // catches anything that's array-like. Native code would throw here.
+ bytes = new Uint8Array(buffer);
+ }
+
+ return decodeImpl(/** @type {!Uint8Array} */ (bytes));
+};
+
+export { FastTextEncoder as TextEncoder };
+export { FastTextDecoder as TextDecoder };
diff --git a/src/bundler.zig b/src/bundler.zig
index 97f975fe9..15d190b0c 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -1254,10 +1254,13 @@ pub const Bundler = struct {
var package_path = path.text;
var file_path = path.text;
- if (resolve_result.package_json) |pkg| {
- if (std.mem.indexOfScalar(u32, this.always_bundled_package_hashes, pkg.hash) != null) {
+ if (resolve_result.package_json) |pkg_| {
+ var pkg: *const PackageJSON = pkg_;
+ if (std.mem.indexOfScalar(u32, this.always_bundled_package_hashes, pkg.hash)) |pkg_i| {
+ pkg = this.always_bundled_package_jsons[pkg_i];
const key_path_source_dir = pkg.source.key_path.sourceDir();
const default_source_dir = pkg.source.path.sourceDir();
+
if (strings.startsWith(path.text, key_path_source_dir)) {
import_path = path.text[key_path_source_dir.len..];
} else if (strings.startsWith(path.text, default_source_dir)) {
diff --git a/src/cli/bun_command.zig b/src/cli/bun_command.zig
index 51c913d5d..a25b33c88 100644
--- a/src/cli/bun_command.zig
+++ b/src/cli/bun_command.zig
@@ -84,6 +84,7 @@ pub const BunCommand = struct {
pub fn exec(
ctx: Command.Context,
) !void {
+ Global.configureAllocator(.{ .long_running = true });
var allocator = ctx.allocator;
var log = ctx.log;
estimated_input_lines_of_code_ = 0;
diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig
index fcdc92a66..c559c97f2 100644
--- a/src/cli/create_command.zig
+++ b/src/cli/create_command.zig
@@ -261,6 +261,8 @@ pub const CreateCommand = struct {
var client: HTTPClient = undefined;
pub fn exec(ctx: Command.Context, positionals_: []const []const u8) !void {
+ Global.configureAllocator(.{ .long_running = false });
+
var create_options = try CreateOptions.parse(ctx, false);
const positionals = create_options.positionals;
diff --git a/src/cli/dev_command.zig b/src/cli/dev_command.zig
index 91dee1bbc..46c4a0e3c 100644
--- a/src/cli/dev_command.zig
+++ b/src/cli/dev_command.zig
@@ -1,9 +1,9 @@
const Server = @import("../http.zig").Server;
const Command = @import("../cli.zig").Command;
-
+const Global = @import("../global.zig").Global;
pub const DevCommand = struct {
pub fn exec(ctx: Command.Context) !void {
+ Global.configureAllocator(.{ .long_running = true });
try Server.start(ctx.allocator, ctx.args, @TypeOf(ctx.debug), ctx.debug);
-
}
};
diff --git a/src/cli/run_command.zig b/src/cli/run_command.zig
index 4593f5f6a..c4c9e5f10 100644
--- a/src/cli/run_command.zig
+++ b/src/cli/run_command.zig
@@ -487,6 +487,7 @@ pub const RunCommand = struct {
if (shebang.len > 2 and strings.eqlComptimeIgnoreLen(shebang[0..2], "#!")) {
break :possibly_open_with_bun_js;
}
+ Global.configureAllocator(.{ .long_running = true });
Run.boot(ctx, file, ctx.allocator.dupe(u8, file_path) catch unreachable) catch |err| {
if (Output.enable_ansi_colors) {
@@ -512,6 +513,9 @@ pub const RunCommand = struct {
}
}
}
+
+ Global.configureAllocator(.{ .long_running = false });
+
var args = ctx.args;
args.node_modules_bundle_path = null;
args.node_modules_bundle_path_server = null;
diff --git a/src/global.zig b/src/global.zig
index 90a5ca7b5..5d2370b53 100644
--- a/src/global.zig
+++ b/src/global.zig
@@ -1,7 +1,9 @@
const std = @import("std");
pub const Environment = @import("env.zig");
-pub const default_allocator: *std.mem.Allocator = if (isTest)
+const use_mimalloc = !Environment.isTest and Environment.isNative;
+
+pub const default_allocator: *std.mem.Allocator = if (!use_mimalloc)
std.heap.c_allocator
else
@import("./memory_allocator.zig").c_allocator;
@@ -77,6 +79,7 @@ pub const Output = struct {
}
}
};
+
pub var enable_ansi_colors = isNative;
pub var enable_buffering = true;
pub var is_stdout_piped = false;
@@ -406,6 +409,23 @@ pub const Global = struct {
else
std.fmt.comptimePrint("0.0.{d}", .{build_id});
+ pub const AllocatorConfiguration = struct {
+ verbose: bool = false,
+ long_running: bool = false,
+ };
+
+ // Enabling huge pages slows down Bun by 8x or so
+ // Keeping this code for:
+ // 1. documentation that an attempt was made
+ // 2. if I want to configure allocator later
+ pub inline fn configureAllocator(config: AllocatorConfiguration) void {
+ // if (comptime !use_mimalloc) return;
+ // const Mimalloc = @import("./allocators/mimalloc.zig");
+ // Mimalloc.mi_option_set_enabled(Mimalloc.mi_option_verbose, config.verbose);
+ // Mimalloc.mi_option_set_enabled(Mimalloc.mi_option_large_os_pages, config.long_running);
+ // if (!config.long_running) Mimalloc.mi_option_set(Mimalloc.mi_option_reset_delay, 0);
+ }
+
pub fn panic(comptime fmt: string, args: anytype) noreturn {
@setCold(true);
if (comptime isWasm) {
diff --git a/src/import_record.zig b/src/import_record.zig
index adb6ea625..aa82e4731 100644
--- a/src/import_record.zig
+++ b/src/import_record.zig
@@ -97,6 +97,8 @@ pub const ImportRecord = struct {
// If true, this was originally written as a bare "import 'file'" statement
was_originally_bare_import: bool = false,
+ was_originally_require: bool = false,
+
kind: ImportKind,
pub const PrintMode = enum {
diff --git a/src/js_parser/js_parser.zig b/src/js_parser/js_parser.zig
index 84e6b0b13..11836bd6e 100644
--- a/src/js_parser/js_parser.zig
+++ b/src/js_parser/js_parser.zig
@@ -428,9 +428,9 @@ pub const ImportScanner = struct {
// This is a breaking change though. We can make it an option with some guardrail
// so maybe if it errors, it shows a suggestion "retry without trimming unused imports"
if (p.options.ts and found_imports and is_unused_in_typescript and !p.options.preserve_unused_imports_ts) {
- // Ignore import records with a pre-filled source index. These are
- // for injected files and we definitely do not want to trim these.
- if (!record.is_internal) {
+ // internal imports are presumed to be always used
+ // require statements cannot be stripped
+ if (!record.is_internal and !record.was_originally_require) {
record.is_unused = true;
continue;
}
@@ -3211,6 +3211,10 @@ pub fn NewParser(
if (!p.options.transform_require_to_import) {
return p.e(E.Require{ .import_record_index = import_record_index }, arg.loc);
}
+
+ p.import_records.items[import_record_index].was_originally_require = true;
+ p.import_records.items[import_record_index].contains_import_star = true;
+
const symbol_name = p.import_records.items[import_record_index].path.name.nonUniqueNameString(p.allocator);
const cjs_import_name = std.fmt.allocPrint(
p.allocator,
diff --git a/src/logger.zig b/src/logger.zig
index 931889aab..b9d2b62ec 100644
--- a/src/logger.zig
+++ b/src/logger.zig
@@ -990,25 +990,26 @@ pub const Source = struct {
}
pub fn initErrorPosition(self: *const Source, _offset: Loc) ErrorPosition {
- var prev_code_point: u21 = 0;
+ var prev_code_point: i32 = 0;
var offset: usize = std.math.min(if (_offset.start < 0) 0 else @intCast(usize, _offset.start), @maximum(self.contents.len, 1) - 1);
const contents = self.contents;
- var iter = unicode.Utf8Iterator{
+ var iter_ = strings.CodepointIterator{
.bytes = self.contents[0..offset],
.i = 0,
};
+ var iter = strings.CodepointIterator.Cursor{};
var line_start: usize = 0;
var line_count: usize = 1;
var column_number: usize = 1;
- while (iter.nextCodepoint()) |code_point| {
- switch (code_point) {
+ while (iter_.next(&iter)) {
+ switch (iter.c) {
'\n' => {
column_number = 1;
- line_start = iter.i + 1;
+ line_start = iter.width + iter.i;
if (prev_code_point != '\r') {
line_count += 1;
}
@@ -1016,12 +1017,12 @@ pub const Source = struct {
'\r' => {
column_number = 0;
- line_start = iter.i + 1;
+ line_start = iter.width + iter.i;
line_count += 1;
},
0x2028, 0x2029 => {
- line_start = iter.i + 3; // These take three bytes to encode in UTF-8
+ line_start = iter.width + iter.i; // These take three bytes to encode in UTF-8
line_count += 1;
column_number = 1;
},
@@ -1030,19 +1031,20 @@ pub const Source = struct {
},
}
- prev_code_point = code_point;
+ prev_code_point = iter.c;
}
- iter = unicode.Utf8Iterator{
+ iter_ = strings.CodepointIterator{
.bytes = self.contents[offset..],
.i = 0,
};
+ iter = strings.CodepointIterator.Cursor{};
// Scan to the end of the line (or end of file if this is the last line)
var line_end: usize = contents.len;
- loop: while (iter.nextCodepoint()) |code_point| {
- switch (code_point) {
+ loop: while (iter_.next(&iter)) {
+ switch (iter.c) {
'\r', '\n', 0x2028, 0x2029 => {
line_end = offset + iter.i;
break :loop;