aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/bun.js/api/JSBundler.zig7
-rw-r--r--src/bun.js/api/JSTranspiler.zig7
-rw-r--r--src/bun.js/javascript.zig1
-rw-r--r--src/bun.js/node/types.zig18
-rw-r--r--src/bun.js/webcore/blob.zig49
-rw-r--r--src/bun.js/webcore/response.zig323
-rw-r--r--src/bun_js.zig20
-rw-r--r--src/bundler.zig1
-rw-r--r--src/bundler/bundle_v2.zig1
-rw-r--r--src/bunfig.zig9
-rw-r--r--src/cli.zig9
-rw-r--r--src/cli/build_command.zig10
-rw-r--r--src/deps/libuwsockets.cpp5
-rw-r--r--src/deps/uws.zig19
-rw-r--r--src/feature_flags.zig2
-rw-r--r--src/fs.zig73
-rw-r--r--src/http.zig11
-rw-r--r--src/http_client_async.zig174
-rw-r--r--src/js_parser.zig25
-rw-r--r--src/linker.zig6
-rw-r--r--src/options.zig9
-rw-r--r--src/runtime.zig2
-rw-r--r--src/url.zig14
23 files changed, 680 insertions, 115 deletions
diff --git a/src/bun.js/api/JSBundler.zig b/src/bun.js/api/JSBundler.zig
index 741d956bf..8e85f1190 100644
--- a/src/bun.js/api/JSBundler.zig
+++ b/src/bun.js/api/JSBundler.zig
@@ -61,6 +61,7 @@ pub const JSBundler = struct {
code_splitting: bool = false,
minify: Minify = .{},
server_components: ServerComponents = ServerComponents{},
+ no_macros: bool = false,
names: Names = .{},
external: bun.StringSet = bun.StringSet.init(bun.default_allocator),
@@ -188,6 +189,12 @@ pub const JSBundler = struct {
}
}
+ if (config.getTruthy(globalThis, "macros")) |macros_flag| {
+ if (!macros_flag.coerce(bool, globalThis)) {
+ this.no_macros = true;
+ }
+ }
+
if (try config.getOptionalEnum(globalThis, "target", options.Target)) |target| {
this.target = target;
}
diff --git a/src/bun.js/api/JSTranspiler.zig b/src/bun.js/api/JSTranspiler.zig
index 8a59f59e7..c58029c5e 100644
--- a/src/bun.js/api/JSTranspiler.zig
+++ b/src/bun.js/api/JSTranspiler.zig
@@ -75,6 +75,7 @@ const TranspilerOptions = struct {
minify_whitespace: bool = false,
minify_identifiers: bool = false,
minify_syntax: bool = false,
+ no_macros: bool = false,
};
// Mimalloc gets unstable if we try to move this to a different thread
@@ -479,6 +480,10 @@ fn transformOptionsFromJSC(globalObject: JSC.C.JSContextRef, temp_allocator: std
if (object.getIfPropertyExists(globalThis, "macro")) |macros| {
macros: {
if (macros.isUndefinedOrNull()) break :macros;
+ if (macros.isBoolean()) {
+ transpiler.no_macros = !macros.asBoolean();
+ break :macros;
+ }
const kind = macros.jsType();
const is_object = kind.isObject();
if (!(kind.isStringLike() or is_object)) {
@@ -775,7 +780,7 @@ pub fn constructor(
globalThis.throwError(err, "Error creating transpiler");
return null;
};
-
+ bundler.options.no_macros = transpiler_options.no_macros;
bundler.configureLinkerWithAutoJSX(false);
bundler.options.env.behavior = .disable;
bundler.configureDefines() catch |err| {
diff --git a/src/bun.js/javascript.zig b/src/bun.js/javascript.zig
index 4a1fcbcb1..5c158a4fb 100644
--- a/src/bun.js/javascript.zig
+++ b/src/bun.js/javascript.zig
@@ -392,6 +392,7 @@ pub const VirtualMachine = struct {
macros: MacroMap,
macro_entry_points: std.AutoArrayHashMap(i32, *MacroEntryPoint),
macro_mode: bool = false,
+ no_macros: bool = false,
has_any_macro_remappings: bool = false,
is_from_devserver: bool = false,
diff --git a/src/bun.js/node/types.zig b/src/bun.js/node/types.zig
index bbe2ea654..f090b3a12 100644
--- a/src/bun.js/node/types.zig
+++ b/src/bun.js/node/types.zig
@@ -1775,17 +1775,19 @@ pub const Path = struct {
var path_slice: JSC.ZigString.Slice = args_ptr[0].toSlice(globalThis, heap_allocator);
defer path_slice.deinit();
var path = path_slice.slice();
- var path_name = Fs.PathName.init(path);
- var root = JSC.ZigString.init(path_name.dir);
- const is_absolute = (isWindows and isZigStringAbsoluteWindows(root)) or (!isWindows and path_name.dir.len > 0 and path_name.dir[0] == '/');
-
+ var path_name = Fs.NodeJSPathName.init(path);
var dir = JSC.ZigString.init(path_name.dir);
+ const is_absolute = (isWindows and isZigStringAbsoluteWindows(dir)) or (!isWindows and path.len > 0 and path[0] == '/');
+
+ // if its not absolute root must be empty
+ var root = JSC.ZigString.Empty;
if (is_absolute) {
- root = JSC.ZigString.Empty;
- if (path_name.dir.len == 0)
- dir = JSC.ZigString.init(if (isWindows) std.fs.path.sep_str_windows else std.fs.path.sep_str_posix);
+ root = JSC.ZigString.init(if (isWindows) std.fs.path.sep_str_windows else std.fs.path.sep_str_posix);
+ // if is absolute and dir is empty, then dir = root
+ if (path_name.dir.len == 0) {
+ dir = root;
+ }
}
-
var base = JSC.ZigString.init(path_name.base);
var name_ = JSC.ZigString.init(path_name.filename);
var ext = JSC.ZigString.init(path_name.ext);
diff --git a/src/bun.js/webcore/blob.zig b/src/bun.js/webcore/blob.zig
index 591150e12..a5d3c968d 100644
--- a/src/bun.js/webcore/blob.zig
+++ b/src/bun.js/webcore/blob.zig
@@ -85,6 +85,7 @@ pub const Blob = struct {
store: ?*Store = null,
content_type: string = "",
content_type_allocated: bool = false,
+ content_type_was_set: bool = false,
/// JavaScriptCore strings are either latin1 or UTF-16
/// When UTF-16, they're nearly always due to non-ascii characters
@@ -111,6 +112,10 @@ pub const Blob = struct {
return bun.FormData.AsyncFormData.init(this.allocator orelse bun.default_allocator, encoding) catch unreachable;
}
+ pub fn hasContentTypeFromUser(this: *const Blob) bool {
+ return this.content_type_was_set or (this.store != null and this.store.?.data == .file);
+ }
+
const FormDataContext = struct {
allocator: std.mem.Allocator,
joiner: StringJoiner,
@@ -228,6 +233,7 @@ pub const Blob = struct {
var blob = Blob.initWithStore(store, globalThis);
blob.content_type = store.mime_type.value;
+ blob.content_type_was_set = true;
return blob;
}
@@ -268,6 +274,7 @@ pub const Blob = struct {
var blob = Blob.initWithStore(store, globalThis);
blob.content_type = std.fmt.allocPrint(allocator, "multipart/form-data; boundary=\"{s}\"", .{boundary}) catch unreachable;
blob.content_type_allocated = true;
+ blob.content_type_was_set = true;
return blob;
}
@@ -288,7 +295,7 @@ pub const Blob = struct {
export fn Blob__dupe(ptr: *anyopaque) *Blob {
var this = bun.cast(*Blob, ptr);
var new = bun.default_allocator.create(Blob) catch unreachable;
- new.* = this.dupe();
+ new.* = this.dupeWithContentType(true);
new.allocator = bun.default_allocator;
return new;
}
@@ -2527,6 +2534,7 @@ pub const Blob = struct {
blob.content_type = content_type;
blob.content_type_allocated = content_type_was_allocated;
+ blob.content_type_was_set = this.content_type_was_set or content_type_was_allocated;
var blob_ = allocator.create(Blob) catch unreachable;
blob_.* = blob;
@@ -2548,13 +2556,13 @@ pub const Blob = struct {
) callconv(.C) JSValue {
if (this.content_type.len > 0) {
if (this.content_type_allocated) {
- return ZigString.init(this.content_type).toValue(globalThis);
+ return ZigString.init(this.content_type).toValueGC(globalThis);
}
return ZigString.init(this.content_type).toValueGC(globalThis);
}
if (this.store) |store| {
- return ZigString.init(store.mime_type.value).toValue(globalThis);
+ return ZigString.init(store.mime_type.value).toValueGC(globalThis);
}
return ZigString.Empty.toValue(globalThis);
@@ -2754,6 +2762,8 @@ pub const Blob = struct {
if (!strings.isAllASCII(slice)) {
break :inner;
}
+ blob.content_type_was_set = true;
+
if (globalThis.bunVM().mimeType(slice)) |mime| {
blob.content_type = mime.value;
break :inner;
@@ -2769,6 +2779,7 @@ pub const Blob = struct {
if (blob.content_type.len == 0) {
blob.content_type = "";
+ blob.content_type_was_set = false;
}
},
}
@@ -2870,8 +2881,33 @@ pub const Blob = struct {
/// This creates a new view
/// and increment the reference count
pub fn dupe(this: *const Blob) Blob {
+ return this.dupeWithContentType(false);
+ }
+
+ pub fn dupeWithContentType(this: *const Blob, include_content_type: bool) Blob {
if (this.store != null) this.store.?.ref();
var duped = this.*;
+ if (duped.content_type_allocated and duped.allocator != null and !include_content_type) {
+
+ // for now, we just want to avoid a use-after-free here
+ if (JSC.VirtualMachine.get().mimeType(duped.content_type)) |mime| {
+ duped.content_type = mime.value;
+ } else {
+ // TODO: fix this
+ // this is a bug.
+ // it means whenever
+ duped.content_type = "";
+ }
+
+ duped.content_type_allocated = false;
+ duped.content_type_was_set = false;
+ if (this.content_type_was_set) {
+ duped.content_type_was_set = duped.content_type.len > 0;
+ }
+ } else if (duped.content_type_allocated and duped.allocator != null and include_content_type) {
+ duped.content_type = bun.default_allocator.dupe(u8, this.content_type) catch @panic("Out of memory");
+ }
+
duped.allocator = null;
return duped;
}
@@ -3477,6 +3513,13 @@ pub const AnyBlob = union(enum) {
// InlineBlob: InlineBlob,
InternalBlob: InternalBlob,
+ pub fn hasContentTypeFromUser(this: AnyBlob) bool {
+ return switch (this) {
+ .Blob => this.Blob.hasContentTypeFromUser(),
+ .InternalBlob => false,
+ };
+ }
+
pub fn toJSON(this: *AnyBlob, global: *JSGlobalObject, comptime lifetime: JSC.WebCore.Lifetime) JSValue {
switch (this.*) {
.Blob => return this.Blob.toJSON(global, lifetime),
diff --git a/src/bun.js/webcore/response.zig b/src/bun.js/webcore/response.zig
index 8d1bfb961..ad3857685 100644
--- a/src/bun.js/webcore/response.zig
+++ b/src/bun.js/webcore/response.zig
@@ -629,7 +629,7 @@ pub const Fetch = struct {
result: HTTPClient.HTTPClientResult = .{},
javascript_vm: *VirtualMachine = undefined,
global_this: *JSGlobalObject = undefined,
- request_body: AnyBlob = undefined,
+ request_body: HTTPRequestBody = undefined,
response_buffer: MutableString = undefined,
request_headers: Headers = Headers{ .allocator = undefined },
promise: JSC.JSPromise.Strong,
@@ -647,6 +647,38 @@ pub const Fetch = struct {
abort_reason: JSValue = JSValue.zero,
// Custom Hostname
hostname: ?[]u8 = null,
+
+ pub const HTTPRequestBody = union(enum) {
+ AnyBlob: AnyBlob,
+ Sendfile: HTTPClient.Sendfile,
+
+ pub fn store(this: *HTTPRequestBody) ?*JSC.WebCore.Blob.Store {
+ return switch (this.*) {
+ .AnyBlob => this.AnyBlob.store(),
+ else => null,
+ };
+ }
+
+ pub fn slice(this: *const HTTPRequestBody) []const u8 {
+ return switch (this.*) {
+ .AnyBlob => this.AnyBlob.slice(),
+ else => "",
+ };
+ }
+
+ pub fn detach(this: *HTTPRequestBody) void {
+ switch (this.*) {
+ .AnyBlob => this.AnyBlob.detach(),
+ .Sendfile => {
+ if (@max(this.Sendfile.offset, this.Sendfile.remain) > 0)
+ _ = JSC.Node.Syscall.close(this.Sendfile.fd);
+ this.Sendfile.offset = 0;
+ this.Sendfile.remain = 0;
+ },
+ }
+ }
+ };
+
pub fn init(_: std.mem.Allocator) anyerror!FetchTasklet {
return FetchTasklet{};
}
@@ -850,12 +882,26 @@ pub const Fetch = struct {
proxy = jsc_vm.bundler.env.getHttpProxy(fetch_options.url);
}
- fetch_tasklet.http.?.* = HTTPClient.AsyncHTTP.init(allocator, fetch_options.method, fetch_options.url, fetch_options.headers.entries, fetch_options.headers.buf.items, &fetch_tasklet.response_buffer, fetch_tasklet.request_body.slice(), fetch_options.timeout, HTTPClient.HTTPClientResult.Callback.New(
- *FetchTasklet,
- FetchTasklet.callback,
- ).init(
- fetch_tasklet,
- ), proxy, if (fetch_tasklet.signal != null) &fetch_tasklet.aborted else null, fetch_options.hostname, fetch_options.redirect_type);
+ fetch_tasklet.http.?.* = HTTPClient.AsyncHTTP.init(
+ allocator,
+ fetch_options.method,
+ fetch_options.url,
+ fetch_options.headers.entries,
+ fetch_options.headers.buf.items,
+ &fetch_tasklet.response_buffer,
+ fetch_tasklet.request_body.slice(),
+ fetch_options.timeout,
+ HTTPClient.HTTPClientResult.Callback.New(
+ *FetchTasklet,
+ FetchTasklet.callback,
+ ).init(
+ fetch_tasklet,
+ ),
+ proxy,
+ if (fetch_tasklet.signal != null) &fetch_tasklet.aborted else null,
+ fetch_options.hostname,
+ fetch_options.redirect_type,
+ );
if (fetch_options.redirect_type != FetchRedirect.follow) {
fetch_tasklet.http.?.client.remaining_redirect_count = 0;
@@ -865,6 +911,12 @@ pub const Fetch = struct {
fetch_tasklet.http.?.client.verbose = fetch_options.verbose;
fetch_tasklet.http.?.client.disable_keepalive = fetch_options.disable_keepalive;
+ if (fetch_tasklet.request_body == .Sendfile) {
+ std.debug.assert(fetch_options.url.isHTTP());
+ std.debug.assert(fetch_options.proxy == null);
+ fetch_tasklet.http.?.request_body = .{ .sendfile = fetch_tasklet.request_body.Sendfile };
+ }
+
if (fetch_tasklet.signal) |signal| {
fetch_tasklet.signal = signal.listen(FetchTasklet, fetch_tasklet, FetchTasklet.abortListener);
}
@@ -886,7 +938,7 @@ pub const Fetch = struct {
const FetchOptions = struct {
method: Method,
headers: Headers,
- body: AnyBlob,
+ body: HTTPRequestBody,
timeout: usize,
disable_timeout: bool,
disable_keepalive: bool,
@@ -961,6 +1013,14 @@ pub const Fetch = struct {
var url = ZigURL{};
var first_arg = args.nextEat().?;
+
+ // We must always get the Body before the Headers That way, we can set
+ // the Content-Type header from the Blob if no Content-Type header is
+ // set in the Headers
+ //
+ // which is important for FormData.
+ // https://github.com/oven-sh/bun/issues/2264
+ //
var body: AnyBlob = AnyBlob{
.Blob = .{},
};
@@ -988,46 +1048,45 @@ pub const Fetch = struct {
method = request.method;
}
+ if (options.fastGet(ctx.ptr(), .body)) |body__| {
+ if (Body.Value.fromJS(ctx.ptr(), body__)) |body_const| {
+ var body_value = body_const;
+ // TODO: buffer ReadableStream?
+ // we have to explicitly check for InternalBlob
+ body = body_value.useAsAnyBlob();
+ } else {
+ // clean hostname if any
+ if (hostname) |host| {
+ bun.default_allocator.free(host);
+ }
+ // an error was thrown
+ return JSC.JSValue.jsUndefined();
+ }
+ } else {
+ body = request.body.value.useAsAnyBlob();
+ }
+
if (options.fastGet(ctx.ptr(), .headers)) |headers_| {
if (headers_.as(FetchHeaders)) |headers__| {
if (headers__.fastGet(JSC.FetchHeaders.HTTPHeaderName.Host)) |_hostname| {
hostname = _hostname.toOwnedSliceZ(bun.default_allocator) catch unreachable;
}
- headers = Headers.from(headers__, bun.default_allocator) catch unreachable;
+ headers = Headers.from(headers__, bun.default_allocator, .{ .body = &body }) catch unreachable;
// TODO: make this one pass
} else if (FetchHeaders.createFromJS(ctx.ptr(), headers_)) |headers__| {
if (headers__.fastGet(JSC.FetchHeaders.HTTPHeaderName.Host)) |_hostname| {
hostname = _hostname.toOwnedSliceZ(bun.default_allocator) catch unreachable;
}
- headers = Headers.from(headers__, bun.default_allocator) catch unreachable;
+ headers = Headers.from(headers__, bun.default_allocator, .{ .body = &body }) catch unreachable;
headers__.deref();
} else if (request.headers) |head| {
if (head.fastGet(JSC.FetchHeaders.HTTPHeaderName.Host)) |_hostname| {
hostname = _hostname.toOwnedSliceZ(bun.default_allocator) catch unreachable;
}
- headers = Headers.from(head, bun.default_allocator) catch unreachable;
+ headers = Headers.from(head, bun.default_allocator, .{ .body = &body }) catch unreachable;
}
} else if (request.headers) |head| {
- headers = Headers.from(head, bun.default_allocator) catch unreachable;
- }
-
- if (options.fastGet(ctx.ptr(), .body)) |body__| {
- if (Body.Value.fromJS(ctx.ptr(), body__)) |body_const| {
- var body_value = body_const;
- // TODO: buffer ReadableStream?
- // we have to explicitly check for InternalBlob
-
- body = body_value.useAsAnyBlob();
- } else {
- // clean hostname if any
- if (hostname) |host| {
- bun.default_allocator.free(host);
- }
- // an error was thrown
- return JSC.JSValue.jsUndefined();
- }
- } else {
- body = request.body.value.useAsAnyBlob();
+ headers = Headers.from(head, bun.default_allocator, .{ .body = &body }) catch unreachable;
}
if (options.get(ctx, "timeout")) |timeout_value| {
@@ -1100,13 +1159,13 @@ pub const Fetch = struct {
}
} else {
method = request.method;
+ body = request.body.value.useAsAnyBlob();
if (request.headers) |head| {
if (head.fastGet(JSC.FetchHeaders.HTTPHeaderName.Host)) |_hostname| {
hostname = _hostname.toOwnedSliceZ(bun.default_allocator) catch unreachable;
}
- headers = Headers.from(head, bun.default_allocator) catch unreachable;
+ headers = Headers.from(head, bun.default_allocator, .{ .body = &body }) catch unreachable;
}
- body = request.body.value.useAsAnyBlob();
// no proxy only url
url = ZigURL.parse(getAllocator(ctx).dupe(u8, request.url) catch unreachable);
url_proxy_buffer = url.href;
@@ -1124,19 +1183,35 @@ pub const Fetch = struct {
method = Method.which(slice_.slice()) orelse .GET;
}
+ if (options.fastGet(ctx.ptr(), .body)) |body__| {
+ if (Body.Value.fromJS(ctx.ptr(), body__)) |body_const| {
+ var body_value = body_const;
+ // TODO: buffer ReadableStream?
+ // we have to explicitly check for InternalBlob
+ body = body_value.useAsAnyBlob();
+ } else {
+ // clean hostname if any
+ if (hostname) |host| {
+ bun.default_allocator.free(host);
+ }
+ // an error was thrown
+ return JSC.JSValue.jsUndefined();
+ }
+ }
+
if (options.fastGet(ctx.ptr(), .headers)) |headers_| {
if (headers_.as(FetchHeaders)) |headers__| {
if (headers__.fastGet(JSC.FetchHeaders.HTTPHeaderName.Host)) |_hostname| {
hostname = _hostname.toOwnedSliceZ(bun.default_allocator) catch unreachable;
}
- headers = Headers.from(headers__, bun.default_allocator) catch unreachable;
+ headers = Headers.from(headers__, bun.default_allocator, .{ .body = &body }) catch unreachable;
// TODO: make this one pass
} else if (FetchHeaders.createFromJS(ctx.ptr(), headers_)) |headers__| {
defer headers__.deref();
if (headers__.fastGet(JSC.FetchHeaders.HTTPHeaderName.Host)) |_hostname| {
hostname = _hostname.toOwnedSliceZ(bun.default_allocator) catch unreachable;
}
- headers = Headers.from(headers__, bun.default_allocator) catch unreachable;
+ headers = Headers.from(headers__, bun.default_allocator, .{ .body = &body }) catch unreachable;
} else {
// Converting the headers failed; return null and
// let the set exception get thrown
@@ -1144,22 +1219,6 @@ pub const Fetch = struct {
}
}
- if (options.fastGet(ctx.ptr(), .body)) |body__| {
- if (Body.Value.fromJS(ctx.ptr(), body__)) |body_const| {
- var body_value = body_const;
- // TODO: buffer ReadableStream?
- // we have to explicitly check for InternalBlob
- body = body_value.useAsAnyBlob();
- } else {
- // clean hostname if any
- if (hostname) |host| {
- bun.default_allocator.free(host);
- }
- // an error was thrown
- return JSC.JSValue.jsUndefined();
- }
- }
-
if (options.get(ctx, "timeout")) |timeout_value| {
if (timeout_value.isBoolean()) {
disable_timeout = !timeout_value.asBoolean();
@@ -1324,6 +1383,125 @@ pub const Fetch = struct {
return JSPromise.rejectedPromiseValue(globalThis, err);
}
+ if (headers == null and body.size() > 0 and body.hasContentTypeFromUser()) {
+ headers = Headers.from(
+ null,
+ bun.default_allocator,
+ .{ .body = &body },
+ ) catch unreachable;
+ }
+
+ var http_body = FetchTasklet.HTTPRequestBody{
+ .AnyBlob = body,
+ };
+
+ if (body.needsToReadFile()) {
+ prepare_body: {
+ const opened_fd_res: JSC.Node.Maybe(bun.FileDescriptor) = switch (body.Blob.store.?.data.file.pathlike) {
+ .fd => |fd| JSC.Node.Maybe(bun.FileDescriptor).errnoSysFd(JSC.Node.Syscall.system.dup(fd), .open, fd) orelse .{ .result = fd },
+ .path => |path| JSC.Node.Syscall.open(path.sliceZ(&globalThis.bunVM().nodeFS().sync_error_buf), std.os.O.RDONLY | std.os.O.NOCTTY, 0),
+ };
+
+ const opened_fd = switch (opened_fd_res) {
+ .err => |err| {
+ bun.default_allocator.free(url_proxy_buffer);
+
+ const rejected_value = JSPromise.rejectedPromiseValue(globalThis, err.toJSC(globalThis));
+ body.detach();
+ if (headers) |*headers_| {
+ headers_.buf.deinit(bun.default_allocator);
+ headers_.entries.deinit(bun.default_allocator);
+ }
+
+ return rejected_value;
+ },
+ .result => |fd| fd,
+ };
+
+ if (proxy == null and bun.HTTP.Sendfile.isEligible(url)) {
+ use_sendfile: {
+ const stat: std.os.Stat = switch (JSC.Node.Syscall.fstat(opened_fd)) {
+ .result => |result| result,
+ // bail out for any reason
+ .err => break :use_sendfile,
+ };
+
+ if (Environment.isMac) {
+ // macOS only supports regular files for sendfile()
+ if (!std.os.S.ISREG(stat.mode)) {
+ break :use_sendfile;
+ }
+ }
+
+ // if it's < 32 KB, it's not worth it
+ if (stat.size < 32 * 1024) {
+ break :use_sendfile;
+ }
+
+ const original_size = body.Blob.size;
+ const stat_size = @intCast(Blob.SizeType, stat.size);
+ const blob_size = if (std.os.S.ISREG(stat.mode))
+ stat_size
+ else
+ @min(original_size, stat_size);
+
+ http_body = .{
+ .Sendfile = .{
+ .fd = opened_fd,
+ .remain = body.Blob.offset + original_size,
+ .offset = body.Blob.offset,
+ .content_size = blob_size,
+ },
+ };
+
+ if (std.os.S.ISREG(stat.mode)) {
+ http_body.Sendfile.offset = @min(http_body.Sendfile.offset, stat_size);
+ http_body.Sendfile.remain = @min(@max(http_body.Sendfile.remain, http_body.Sendfile.offset), stat_size) -| http_body.Sendfile.offset;
+ }
+ body.detach();
+
+ break :prepare_body;
+ }
+ }
+
+ // TODO: make this async + lazy
+ const res = JSC.Node.NodeFS.readFile(
+ globalThis.bunVM().nodeFS(),
+ .{
+ .encoding = .buffer,
+ .path = .{ .fd = opened_fd },
+ .offset = body.Blob.offset,
+ .max_size = body.Blob.size,
+ },
+ .sync,
+ );
+
+ if (body.Blob.store.?.data.file.pathlike == .path) {
+ _ = JSC.Node.Syscall.close(opened_fd);
+ }
+
+ switch (res) {
+ .err => |err| {
+ bun.default_allocator.free(url_proxy_buffer);
+
+ const rejected_value = JSPromise.rejectedPromiseValue(globalThis, err.toJSC(globalThis));
+ body.detach();
+ if (headers) |*headers_| {
+ headers_.buf.deinit(bun.default_allocator);
+ headers_.entries.deinit(bun.default_allocator);
+ }
+
+ return rejected_value;
+ },
+ .result => |result| {
+ body.detach();
+ body.from(std.ArrayList(u8).fromOwnedSlice(bun.default_allocator, @constCast(result.slice())));
+ http_body = .{ .AnyBlob = body };
+ },
+ }
+ }
+ }
+
// Only create this after we have validated all the input.
// or else we will leak it
var promise = JSPromise.Strong.init(globalThis);
@@ -1340,7 +1518,7 @@ pub const Fetch = struct {
.headers = headers orelse Headers{
.allocator = bun.default_allocator,
},
- .body = body,
+ .body = http_body,
.timeout = std.time.ns_per_hour,
.disable_keepalive = disable_keepalive,
.disable_timeout = disable_timeout,
@@ -1376,15 +1554,31 @@ pub const Headers = struct {
"";
}
- pub fn from(headers_ref: *FetchHeaders, allocator: std.mem.Allocator) !Headers {
+ pub const Options = struct {
+ body: ?*const AnyBlob = null,
+ };
+
+ pub fn from(fetch_headers_ref: ?*FetchHeaders, allocator: std.mem.Allocator, options: Options) !Headers {
var header_count: u32 = 0;
var buf_len: u32 = 0;
- headers_ref.count(&header_count, &buf_len);
+ if (fetch_headers_ref) |headers_ref|
+ headers_ref.count(&header_count, &buf_len);
var headers = Headers{
.entries = .{},
.buf = .{},
.allocator = allocator,
};
+ const buf_len_before_content_type = buf_len;
+ const needs_content_type = brk: {
+ if (options.body) |body| {
+ if (body.hasContentTypeFromUser() and (fetch_headers_ref == null or !fetch_headers_ref.?.fastHas(.ContentType))) {
+ header_count += 1;
+ buf_len += @truncate(u32, body.contentType().len + "Content-Type".len);
+ break :brk true;
+ }
+ }
+ break :brk false;
+ };
headers.entries.ensureTotalCapacity(allocator, header_count) catch unreachable;
headers.entries.len = header_count;
headers.buf.ensureTotalCapacityPrecise(allocator, buf_len) catch unreachable;
@@ -1392,7 +1586,24 @@ pub const Headers = struct {
var sliced = headers.entries.slice();
var names = sliced.items(.name);
var values = sliced.items(.value);
- headers_ref.copyTo(names.ptr, values.ptr, headers.buf.items.ptr);
+ if (fetch_headers_ref) |headers_ref|
+ headers_ref.copyTo(names.ptr, values.ptr, headers.buf.items.ptr);
+
+ // TODO: maybe we should send Content-Type header first instead of last?
+ if (needs_content_type) {
+ bun.copy(u8, headers.buf.items[buf_len_before_content_type..], "Content-Type");
+ names[header_count - 1] = .{
+ .offset = buf_len_before_content_type,
+ .length = "Content-Type".len,
+ };
+
+ bun.copy(u8, headers.buf.items[buf_len_before_content_type + "Content-Type".len ..], options.body.?.contentType());
+ values[header_count - 1] = .{
+ .offset = buf_len_before_content_type + @as(u32, "Content-Type".len),
+ .length = @truncate(u32, options.body.?.contentType().len),
+ };
+ }
+
return headers;
}
};
@@ -1567,7 +1778,7 @@ pub const FetchEvent = struct {
var content_length: ?usize = null;
if (response.body.init.headers) |headers_ref| {
- var headers = Headers.from(headers_ref, request_context.allocator) catch unreachable;
+ var headers = Headers.from(headers_ref, request_context.allocator, .{}) catch unreachable;
var i: usize = 0;
while (i < headers.entries.len) : (i += 1) {
diff --git a/src/bun_js.zig b/src/bun_js.zig
index 12876cae8..fd124a8ac 100644
--- a/src/bun_js.zig
+++ b/src/bun_js.zig
@@ -90,8 +90,14 @@ pub const Run = struct {
// b.options.minify_syntax = ctx.bundler_options.minify_syntax;
- if (ctx.debug.macros) |macros| {
- b.options.macro_remap = macros;
+ switch (ctx.debug.macros) {
+ .disable => {
+ b.options.no_macros = true;
+ },
+ .map => |macros| {
+ b.options.macro_remap = macros;
+ },
+ .unspecified => {},
}
b.configureRouter(false) catch {
@@ -175,8 +181,14 @@ pub const Run = struct {
// b.options.minify_syntax = ctx.bundler_options.minify_syntax;
- if (ctx.debug.macros) |macros| {
- b.options.macro_remap = macros;
+ switch (ctx.debug.macros) {
+ .disable => {
+ b.options.no_macros = true;
+ },
+ .map => |macros| {
+ b.options.macro_remap = macros;
+ },
+ .unspecified => {},
}
b.configureRouter(false) catch {
diff --git a/src/bundler.zig b/src/bundler.zig
index f3296134e..ea8222870 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -1388,6 +1388,7 @@ pub const Bundler = struct {
opts.features.trim_unused_imports = bundler.options.trim_unused_imports orelse loader.isTypeScript();
opts.features.should_fold_typescript_constant_expressions = loader.isTypeScript() or target.isBun() or bundler.options.minify_syntax;
opts.features.dynamic_require = target.isBun();
+ opts.features.no_macros = bundler.options.no_macros;
opts.transform_only = bundler.options.transform_only;
// @bun annotation
diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig
index 967bfaa36..2a97b98ed 100644
--- a/src/bundler/bundle_v2.zig
+++ b/src/bundler/bundle_v2.zig
@@ -1599,6 +1599,7 @@ pub const BundleV2 = struct {
completion.env,
);
bundler.options.jsx = config.jsx;
+ bundler.options.no_macros = config.no_macros;
bundler.options.react_server_components = config.server_components.client.items.len > 0 or config.server_components.server.items.len > 0;
bundler.options.loaders = try options.loadersFromTransformOptions(allocator, config.loaders, config.target);
bundler.options.entry_naming = config.names.entry_point.data;
diff --git a/src/bunfig.zig b/src/bunfig.zig
index af3842451..9df2978b0 100644
--- a/src/bunfig.zig
+++ b/src/bunfig.zig
@@ -579,8 +579,13 @@ pub const Bunfig = struct {
}
if (json.get("macros")) |expr| {
- // technical debt
- this.ctx.debug.macros = PackageJSON.parseMacrosJSON(allocator, expr, this.log, this.source);
+ if (expr.data == .e_boolean) {
+ if (expr.data.e_boolean.value == false) {
+ this.ctx.debug.macros = .{ .disable = {} };
+ }
+ } else {
+ this.ctx.debug.macros = .{ .map = PackageJSON.parseMacrosJSON(allocator, expr, this.log, this.source) };
+ }
Analytics.Features.macros = true;
}
diff --git a/src/cli.zig b/src/cli.zig
index 0ad948ac7..dc1ae0cdc 100644
--- a/src/cli.zig
+++ b/src/cli.zig
@@ -157,6 +157,7 @@ pub const Arguments = struct {
clap.parseParam("--minify-syntax Minify syntax and inline data (experimental)") catch unreachable,
clap.parseParam("--minify-whitespace Minify whitespace (experimental)") catch unreachable,
clap.parseParam("--minify-identifiers Minify identifiers") catch unreachable,
+ clap.parseParam("--no-macros Disable macros from being executed in the bundler, transpiler and runtime") catch unreachable,
clap.parseParam("--target <STR> The intended execution environment for the bundle. \"browser\", \"bun\" or \"node\"") catch unreachable,
clap.parseParam("<POS>... ") catch unreachable,
};
@@ -744,6 +745,10 @@ pub const Arguments = struct {
ctx.log.level = logger.Log.default_log_level;
}
+ if (args.flag("--no-macros")) {
+ ctx.debug.macros = .{ .disable = {} };
+ }
+
opts.output_dir = output_dir;
if (output_file != null)
ctx.debug.output_file = output_file.?;
@@ -898,7 +903,7 @@ pub const Command = struct {
loaded_bunfig: bool = false,
// technical debt
- macros: ?MacroMap = null,
+ macros: MacroOptions = MacroOptions.unspecified,
editor: string = "",
package_bundle_map: bun.StringArrayHashMapUnmanaged(options.BundlePackage) = bun.StringArrayHashMapUnmanaged(options.BundlePackage){},
@@ -906,6 +911,8 @@ pub const Command = struct {
output_file: []const u8 = "",
};
+ pub const MacroOptions = union(enum) { unspecified: void, disable: void, map: MacroMap };
+
pub const HotReload = enum {
none,
hot,
diff --git a/src/cli/build_command.zig b/src/cli/build_command.zig
index 52b45c493..14414c7de 100644
--- a/src/cli/build_command.zig
+++ b/src/cli/build_command.zig
@@ -207,8 +207,14 @@ pub const BuildCommand = struct {
this_bundler.options.jsx.development = !this_bundler.options.production;
this_bundler.resolver.opts.jsx.development = this_bundler.options.jsx.development;
- if (ctx.debug.macros) |macros| {
- this_bundler.options.macro_remap = macros;
+ switch (ctx.debug.macros) {
+ .disable => {
+ this_bundler.options.no_macros = true;
+ },
+ .map => |macros| {
+ this_bundler.options.macro_remap = macros;
+ },
+ .unspecified => {},
}
// var env_loader = this_bundler.env;
diff --git a/src/deps/libuwsockets.cpp b/src/deps/libuwsockets.cpp
index ae6443cba..aa4889892 100644
--- a/src/deps/libuwsockets.cpp
+++ b/src/deps/libuwsockets.cpp
@@ -1572,4 +1572,9 @@ extern "C"
return uwsRes->getNativeHandle();
}
}
+
+ void us_socket_sendfile_needs_more(us_socket_t *s) {
+ s->context->loop->data.last_write_failed = 1;
+ us_poll_change(&s->p, s->context->loop, LIBUS_SOCKET_READABLE | LIBUS_SOCKET_WRITABLE);
+ }
}
diff --git a/src/deps/uws.zig b/src/deps/uws.zig
index c9f350a37..538756b71 100644
--- a/src/deps/uws.zig
+++ b/src/deps/uws.zig
@@ -43,6 +43,23 @@ pub fn NewSocketHandler(comptime ssl: bool) type {
pub fn getNativeHandle(this: ThisSocket) *NativeSocketHandleType(ssl) {
return @ptrCast(*NativeSocketHandleType(ssl), us_socket_get_native_handle(comptime ssl_int, this.socket).?);
}
+
+ pub fn fd(this: ThisSocket) i32 {
+ if (comptime ssl) {
+ @compileError("SSL sockets do not have a file descriptor accessible this way");
+ }
+
+ return @intCast(i32, @ptrToInt(us_socket_get_native_handle(0, this.socket)));
+ }
+
+ pub fn markNeedsMoreForSendfile(this: ThisSocket) void {
+ if (comptime ssl) {
+ @compileError("SSL sockets do not support sendfile yet");
+ }
+
+ us_socket_sendfile_needs_more(this.socket);
+ }
+
pub fn ext(this: ThisSocket, comptime ContextType: type) ?*ContextType {
const alignment = if (ContextType == *anyopaque)
@sizeOf(usize)
@@ -1882,3 +1899,5 @@ pub const State = enum(i32) {
return @enumToInt(this) & @enumToInt(State.HTTP_CONNECTION_CLOSE) != 0;
}
};
+
+extern fn us_socket_sendfile_needs_more(socket: *Socket) void;
diff --git a/src/feature_flags.zig b/src/feature_flags.zig
index cdfeacb10..0a0c920a4 100644
--- a/src/feature_flags.zig
+++ b/src/feature_flags.zig
@@ -170,3 +170,5 @@ pub const source_map_debug_id = true;
pub const alignment_tweak = false;
pub const export_star_redirect = false;
+
+pub const streaming_file_uploads_for_http_client = true;
diff --git a/src/fs.zig b/src/fs.zig
index 6e1da47f4..c0f3cd9dd 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -1190,13 +1190,84 @@ pub const FileSystem = struct {
pub const Directory = struct { path: Path, contents: []string };
pub const File = struct { path: Path, contents: string };
-pub const PathName = struct {
+pub const NodeJSPathName = struct {
base: string,
dir: string,
/// includes the leading .
ext: string,
filename: string,
+ pub fn init(_path: string) NodeJSPathName {
+ var path = _path;
+ var base = path;
+ // ext must be empty if not detected
+ var ext: string = "";
+ var dir = path;
+ var is_absolute = true;
+ var _i = strings.lastIndexOfChar(path, '/');
+ var first = true;
+ while (_i) |i| {
+
+ // Stop if we found a non-trailing slash
+ if (i + 1 != path.len) {
+ base = path[i + 1 ..];
+ dir = path[0..i];
+ is_absolute = false;
+ break;
+ }
+
+ // If the path starts with a slash and it's the only slash, it's absolute
+ if (i == 0 and first) {
+ base = path[1..];
+ dir = &([_]u8{});
+ break;
+ }
+
+ first = false;
+ // Ignore trailing slashes
+
+ path = path[0..i];
+
+ _i = strings.lastIndexOfChar(path, '/');
+ }
+
+ // clean trailing slashs
+ if (base.len > 1 and base[base.len - 1] == '/') {
+ base = base[0 .. base.len - 1];
+ }
+
+ // filename is base without extension
+ var filename = base;
+
+ // if only one character ext = "" even if filename it's "."
+ if (filename.len > 1) {
+ // Strip off the extension
+ var _dot = strings.lastIndexOfChar(filename, '.');
+ if (_dot) |dot| {
+ ext = filename[dot..];
+ filename = filename[0..dot];
+ }
+ }
+
+ if (is_absolute) {
+ dir = &([_]u8{});
+ }
+
+ return NodeJSPathName{
+ .dir = dir,
+ .base = base,
+ .ext = ext,
+ .filename = filename,
+ };
+ }
+};
+
+pub const PathName = struct {
+ base: string,
+ dir: string,
+ /// includes the leading .
+ ext: string,
+ filename: string,
pub fn nonUniqueNameStringBase(self: *const PathName) string {
// /bar/foo/index.js -> foo
if (self.dir.len > 0 and strings.eqlComptime(self.base, "index")) {
diff --git a/src/http.zig b/src/http.zig
index c54f4ea9c..f26a0e985 100644
--- a/src/http.zig
+++ b/src/http.zig
@@ -1484,6 +1484,7 @@ pub const RequestContext = struct {
std.debug.assert(JavaScript.VirtualMachine.isLoaded());
javascript_vm = vm;
vm.bundler.options.origin = handler.origin;
+ vm.bundler.options.no_macros = handler.client_bundler.options.no_macros;
const boot = vm.bundler.options.framework.?.server.path;
std.debug.assert(boot.len > 0);
errdefer vm.deinit();
@@ -3972,7 +3973,15 @@ pub const Server = struct {
http_editor_context.name = debug.editor;
- server.bundler.options.macro_remap = debug.macros orelse .{};
+ switch (debug.macros) {
+ .disable => {
+ server.bundler.options.no_macros = true;
+ },
+ .map => |macros| {
+ server.bundler.options.macro_remap = macros;
+ },
+ .unspecified => {},
+ }
if (debug.fallback_only or server.bundler.env.map.get("BUN_DISABLE_BUN_JS") != null) {
RequestContext.fallback_only = true;
diff --git a/src/http_client_async.zig b/src/http_client_async.zig
index 4e0926baa..fe5f34f48 100644
--- a/src/http_client_async.zig
+++ b/src/http_client_async.zig
@@ -71,6 +71,89 @@ pub const FetchRedirect = enum(u8) {
});
};
+pub const HTTPRequestBody = union(enum) {
+ bytes: []const u8,
+ sendfile: Sendfile,
+
+ pub fn len(this: *const HTTPRequestBody) usize {
+ return switch (this.*) {
+ .bytes => this.bytes.len,
+ .sendfile => this.sendfile.content_size,
+ };
+ }
+};
+
+pub const Sendfile = struct {
+ fd: bun.FileDescriptor,
+ remain: usize = 0,
+ offset: usize = 0,
+ content_size: usize = 0,
+
+ pub fn isEligible(url: bun.URL) bool {
+ return url.isHTTP() and url.href.len > 0 and FeatureFlags.streaming_file_uploads_for_http_client;
+ }
+
+ pub fn write(
+ this: *Sendfile,
+ socket: NewHTTPContext(false).HTTPSocket,
+ ) Status {
+ const adjusted_count_temporary = @min(@as(u64, this.remain), @as(u63, std.math.maxInt(u63)));
+ // TODO we should not need this int cast; improve the return type of `@min`
+ const adjusted_count = @intCast(u63, adjusted_count_temporary);
+
+ if (Environment.isLinux) {
+ var signed_offset = @intCast(i64, this.offset);
+ const begin = this.offset;
+ const val =
+ // this does the syscall directly, without libc
+ std.os.linux.sendfile(socket.fd(), this.fd, &signed_offset, this.remain);
+ this.offset = @intCast(u64, signed_offset);
+
+ const errcode = std.os.linux.getErrno(val);
+
+ this.remain -|= @intCast(u64, this.offset -| begin);
+
+ if (errcode != .SUCCESS or this.remain == 0 or val == 0) {
+ if (errcode == .SUCCESS) {
+ return .{ .done = {} };
+ }
+
+ return .{ .err = AsyncIO.asError(errcode) };
+ }
+ } else {
+ var sbytes: std.os.off_t = adjusted_count;
+ const signed_offset = @bitCast(i64, @as(u64, this.offset));
+ const errcode = std.c.getErrno(std.c.sendfile(
+ this.fd,
+ socket.fd(),
+
+ signed_offset,
+ &sbytes,
+ null,
+ 0,
+ ));
+ const wrote = @intCast(u64, sbytes);
+ this.offset +|= wrote;
+ this.remain -|= wrote;
+ if (errcode != .AGAIN or this.remain == 0 or sbytes == 0) {
+ if (errcode == .SUCCESS) {
+ return .{ .done = {} };
+ }
+
+ return .{ .err = AsyncIO.asError(errcode) };
+ }
+ }
+
+ return .{ .again = {} };
+ }
+
+ pub const Status = union(enum) {
+ done: void,
+ err: anyerror,
+ again: void,
+ };
+};
+
const ProxySSLData = struct {
buffer: std.ArrayList(u8),
partial: bool,
@@ -738,7 +821,7 @@ pub fn onClose(
if (client.allow_retry) {
client.allow_retry = false;
- client.start(client.state.request_body, client.state.body_out_str.?);
+ client.start(client.state.original_request_body, client.state.body_out_str.?);
return;
}
@@ -915,14 +998,16 @@ pub const InternalState = struct {
compressed_body: MutableString = undefined,
body_size: usize = 0,
request_body: []const u8 = "",
+ original_request_body: HTTPRequestBody = .{ .bytes = "" },
request_sent_len: usize = 0,
fail: anyerror = error.NoError,
request_stage: HTTPStage = .pending,
response_stage: HTTPStage = .pending,
- pub fn init(body: []const u8, body_out_str: *MutableString) InternalState {
+ pub fn init(body: HTTPRequestBody, body_out_str: *MutableString) InternalState {
return .{
- .request_body = body,
+ .original_request_body = body,
+ .request_body = if (body == .bytes) body.bytes else "",
.compressed_body = MutableString{ .allocator = default_allocator, .list = .{} },
.response_message_buffer = MutableString{ .allocator = default_allocator, .list = .{} },
.body_out_str = body_out_str,
@@ -942,6 +1027,7 @@ pub const InternalState = struct {
.body_out_str = body_msg,
.compressed_body = MutableString{ .allocator = default_allocator, .list = .{} },
.response_message_buffer = MutableString{ .allocator = default_allocator, .list = .{} },
+ .original_request_body = .{ .bytes = "" },
.request_body = "",
};
}
@@ -1191,7 +1277,7 @@ pub const AsyncHTTP = struct {
request_headers: Headers.Entries = Headers.Entries{},
response_headers: Headers.Entries = Headers.Entries{},
response_buffer: *MutableString,
- request_body: []const u8 = "",
+ request_body: HTTPRequestBody = .{ .bytes = "" },
allocator: std.mem.Allocator,
request_header_buf: string = "",
method: Method = Method.GET,
@@ -1278,7 +1364,18 @@ pub const AsyncHTTP = struct {
hostname: ?[]u8,
redirect_type: FetchRedirect,
) AsyncHTTP {
- var this = AsyncHTTP{ .allocator = allocator, .url = url, .method = method, .request_headers = headers, .request_header_buf = headers_buf, .request_body = request_body, .response_buffer = response_buffer, .completion_callback = callback, .http_proxy = http_proxy, .async_http_id = if (signal != null) async_http_id.fetchAdd(1, .Monotonic) else 0 };
+ var this = AsyncHTTP{
+ .allocator = allocator,
+ .url = url,
+ .method = method,
+ .request_headers = headers,
+ .request_header_buf = headers_buf,
+ .request_body = .{ .bytes = request_body },
+ .response_buffer = response_buffer,
+ .completion_callback = callback,
+ .http_proxy = http_proxy,
+ .async_http_id = if (signal != null) async_http_id.fetchAdd(1, .Monotonic) else 0,
+ };
this.client = HTTPClient.init(allocator, method, url, headers, headers_buf, signal, hostname);
this.client.async_http_id = this.async_http_id;
@@ -1648,7 +1745,7 @@ pub fn doRedirect(this: *HTTPClient) void {
if (this.aborted != null) {
_ = socket_async_http_abort_tracker.swapRemove(this.async_http_id);
}
- return this.start("", body_out_str);
+ return this.start(.{ .bytes = "" }, body_out_str);
}
pub fn isHTTPS(this: *HTTPClient) bool {
if (this.http_proxy) |proxy| {
@@ -1662,7 +1759,7 @@ pub fn isHTTPS(this: *HTTPClient) bool {
}
return false;
}
-pub fn start(this: *HTTPClient, body: []const u8, body_out_str: *MutableString) void {
+pub fn start(this: *HTTPClient, body: HTTPRequestBody, body_out_str: *MutableString) void {
body_out_str.reset();
std.debug.assert(this.state.response_message_buffer.list.capacity == 0);
@@ -1730,7 +1827,7 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
this.setTimeout(socket, 60);
- const request = this.buildRequest(this.state.request_body.len);
+ const request = this.buildRequest(this.state.original_request_body.len());
if (this.http_proxy) |_| {
if (this.url.isHTTPS()) {
@@ -1784,7 +1881,10 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
std.debug.assert(!socket.isShutdown());
std.debug.assert(!socket.isClosed());
}
- const amount = socket.write(to_send, false);
+ const amount = socket.write(
+ to_send,
+ false,
+ );
if (comptime is_first_call) {
if (amount == 0) {
// don't worry about it
@@ -1804,7 +1904,10 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
this.state.request_body = this.state.request_body[this.state.request_sent_len - headers_len ..];
}
- const has_sent_body = this.state.request_body.len == 0;
+ const has_sent_body = if (this.state.original_request_body == .bytes)
+ this.state.request_body.len == 0
+ else
+ false;
if (has_sent_headers and has_sent_body) {
this.state.request_stage = .done;
@@ -1813,7 +1916,11 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
if (has_sent_headers) {
this.state.request_stage = .body;
- std.debug.assert(this.state.request_body.len > 0);
+ std.debug.assert(
+ // we should have leftover data OR we use sendfile()
+ (this.state.original_request_body == .bytes and this.state.request_body.len > 0) or
+ this.state.original_request_body == .sendfile,
+ );
// we sent everything, but there's some body leftover
if (amount == @intCast(c_int, to_send.len)) {
@@ -1826,19 +1933,42 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
.body => {
this.setTimeout(socket, 60);
- const to_send = this.state.request_body;
- const amount = socket.write(to_send, true);
- if (amount < 0) {
- this.closeAndFail(error.WriteFailed, is_ssl, socket);
- return;
- }
+ switch (this.state.original_request_body) {
+ .bytes => {
+ const to_send = this.state.request_body;
+ const amount = socket.write(to_send, true);
+ if (amount < 0) {
+ this.closeAndFail(error.WriteFailed, is_ssl, socket);
+ return;
+ }
- this.state.request_sent_len += @intCast(usize, amount);
- this.state.request_body = this.state.request_body[@intCast(usize, amount)..];
+ this.state.request_sent_len += @intCast(usize, amount);
+ this.state.request_body = this.state.request_body[@intCast(usize, amount)..];
- if (this.state.request_body.len == 0) {
- this.state.request_stage = .done;
- return;
+ if (this.state.request_body.len == 0) {
+ this.state.request_stage = .done;
+ return;
+ }
+ },
+ .sendfile => |*sendfile| {
+ if (comptime is_ssl) {
+ @panic("sendfile is only supported without SSL. This code should never have been reached!");
+ }
+
+ switch (sendfile.write(socket)) {
+ .done => {
+ this.state.request_stage = .done;
+ return;
+ },
+ .err => |err| {
+ this.closeAndFail(err, false, socket);
+ return;
+ },
+ .again => {
+ socket.markNeedsMoreForSendfile();
+ },
+ }
+ },
}
},
.proxy_body => {
diff --git a/src/js_parser.zig b/src/js_parser.zig
index 0fc4f794a..a9cd4379c 100644
--- a/src/js_parser.zig
+++ b/src/js_parser.zig
@@ -12750,7 +12750,7 @@ fn NewParser_(
// Only continue if we have started
if ((optional_start orelse .ccontinue) == .start) {
- optional_start = .ccontinue;
+ optional_chain = .ccontinue;
}
},
.t_no_substitution_template_literal => {
@@ -15361,6 +15361,18 @@ fn NewParser_(
if (p.is_control_flow_dead) {
return p.newExpr(E.Undefined{}, e_.tag.?.loc);
}
+
+ // this ordering incase someone wants ot use a macro in a node_module conditionally
+ if (p.options.features.no_macros) {
+ p.log.addError(p.source, tag.loc, "Macros are disabled") catch unreachable;
+ return p.newExpr(E.Undefined{}, e_.tag.?.loc);
+ }
+
+ if (p.source.path.isNodeModule()) {
+ p.log.addError(p.source, expr.loc, "For security reasons, macros cannot be run from node_modules.") catch unreachable;
+ return p.newExpr(E.Undefined{}, expr.loc);
+ }
+
p.macro_call_count += 1;
const record = &p.import_records.items[import_record_id];
// We must visit it to convert inline_identifiers and record usage
@@ -16510,6 +16522,17 @@ fn NewParser_(
if (p.is_control_flow_dead) {
return p.newExpr(E.Undefined{}, e_.target.loc);
}
+
+ if (p.options.features.no_macros) {
+ p.log.addError(p.source, expr.loc, "Macros are disabled") catch unreachable;
+ return p.newExpr(E.Undefined{}, expr.loc);
+ }
+
+ if (p.source.path.isNodeModule()) {
+ p.log.addError(p.source, expr.loc, "For security reasons, macros cannot be run from node_modules.") catch unreachable;
+ return p.newExpr(E.Undefined{}, expr.loc);
+ }
+
const name = p.symbols.items[ref.innerIndex()].original_name;
const record = &p.import_records.items[import_record_id];
const copied = Expr{ .loc = expr.loc, .data = .{ .e_call = e_ } };
diff --git a/src/linker.zig b/src/linker.zig
index ca57e2f85..a2f1dab71 100644
--- a/src/linker.zig
+++ b/src/linker.zig
@@ -1016,10 +1016,8 @@ pub const Linker = struct {
.napi => {
import_record.print_mode = .napi_module;
},
- .wasm => {
- import_record.print_mode = .import_path;
- },
- .file => {
+
+ .wasm, .file => {
// if we're building for web/node, always print as import path
// if we're building for bun
diff --git a/src/options.zig b/src/options.zig
index 4133e95f7..a39c9fc6c 100644
--- a/src/options.zig
+++ b/src/options.zig
@@ -616,6 +616,7 @@ pub const Target = enum {
array.set(
Target.bun_macro,
&[_]string{
+ "macro",
"bun",
"worker",
"module",
@@ -624,13 +625,6 @@ pub const Target = enum {
"browser",
},
);
- // array.set(Target.bun_macro, [_]string{ "bun_macro", "browser", "default", },);
-
- // Original comment:
- // The neutral target is for people that don't want esbuild to try to
- // pick good defaults for their platform. In that case, the list of main
- // fields is empty by default. You must explicitly configure it yourself.
- // array.set(Target.neutral, &listc);
break :brk array;
};
@@ -1436,6 +1430,7 @@ pub const BundleOptions = struct {
rewrite_jest_for_tests: bool = false,
macro_remap: MacroRemap = MacroRemap{},
+ no_macros: bool = false,
conditions: ESMConditions = undefined,
tree_shaking: bool = false,
diff --git a/src/runtime.zig b/src/runtime.zig
index f09e16378..7312aa4bd 100644
--- a/src/runtime.zig
+++ b/src/runtime.zig
@@ -283,6 +283,8 @@ pub const Runtime = struct {
inject_jest_globals: bool = false,
+ no_macros: bool = false,
+
commonjs_named_exports: bool = true,
minify_syntax: bool = false,
diff --git a/src/url.zig b/src/url.zig
index 1e1b284b5..d0fcdab38 100644
--- a/src/url.zig
+++ b/src/url.zig
@@ -997,6 +997,9 @@ pub const FormData = struct {
defer blob.detach();
var filename = bun.JSC.ZigString.initUTF8(filename_str);
const content_type: []const u8 = brk: {
+ if (!field.content_type.isEmpty()) {
+ break :brk field.content_type.slice(buf);
+ }
if (filename_str.len > 0) {
if (bun.HTTP.MimeType.byExtensionNoDefault(std.fs.path.extension(filename_str))) |mime| {
break :brk mime.value;
@@ -1011,8 +1014,15 @@ pub const FormData = struct {
};
if (content_type.len > 0) {
- blob.content_type = content_type;
- blob.content_type_allocated = false;
+ if (!field.content_type.isEmpty()) {
+ blob.content_type_allocated = true;
+ blob.content_type = bun.default_allocator.dupe(u8, content_type) catch @panic("failed to allocate memory for blob content type");
+ blob.content_type_was_set = true;
+ } else {
+ blob.content_type = content_type;
+ blob.content_type_was_set = false;
+ blob.content_type_allocated = false;
+ }
}
wrap.form.appendBlob(wrap.globalThis, &key, &blob, &filename);