diff options
Diffstat (limited to 'src/bun.js/node')
-rw-r--r-- | src/bun.js/node/buffer.js | 97 | ||||
-rw-r--r-- | src/bun.js/node/buffer.zig | 94 | ||||
-rw-r--r-- | src/bun.js/node/dir_iterator.zig | 347 | ||||
-rw-r--r-- | src/bun.js/node/node_fs.zig | 3657 | ||||
-rw-r--r-- | src/bun.js/node/node_fs_binding.zig | 423 | ||||
-rw-r--r-- | src/bun.js/node/node_fs_constant.zig | 204 | ||||
-rw-r--r-- | src/bun.js/node/nodejs_error_code.zig | 1097 | ||||
-rw-r--r-- | src/bun.js/node/syscall.zig | 622 | ||||
-rw-r--r-- | src/bun.js/node/types.zig | 2702 |
9 files changed, 9243 insertions, 0 deletions
diff --git a/src/bun.js/node/buffer.js b/src/bun.js/node/buffer.js new file mode 100644 index 000000000..faee19655 --- /dev/null +++ b/src/bun.js/node/buffer.js @@ -0,0 +1,97 @@ +"use strict"; + +function createBuffer(BufferPrototype, BufferStatic, Realm) { + "use strict"; + + var Uint8ArraySubarray = Realm.Uint8Array.prototype.subarray; + var isUint8Array = (value) => value instanceof Realm.Uint8Array; + var SymbolToPrimitive = Realm.Symbol.toPrimitive; + var isArray = Realm.Array.isArray; + var isArrayBufferLike = + "SharedArrayBuffer" in Realm + ? () => + value instanceof Realm.ArrayBuffer || + value instanceof Realm.SharedArrayBuffer + : () => value instanceof Realm.ArrayBuffer; + + var BufferInstance = class BufferInstance extends Realm.Uint8Array { + constructor(bufferOrLength, byteOffset, length) { + super(bufferOrLength, byteOffset, length); + } + + static isBuffer(obj) { + return obj instanceof BufferInstance; + } + + static from(value, encodingOrOffset, length) { + switch (typeof value) { + case "string": { + return BufferStatic.fromString(value, encodingOrOffset, length); + } + case "object": { + if (isUint8Array(value)) { + return BufferStatic.fromUint8Array(value, encodingOrOffset, length); + } + + if (isArrayBufferLike(value)) { + return new BufferInstance(value, 0, length); + } + + const valueOf = value.valueOf && value.valueOf(); + if ( + valueOf != null && + valueOf !== value && + (typeof valueOf === "string" || typeof valueOf === "object") + ) { + return BufferInstance.from(valueOf, encodingOrOffset, length); + } + + if (typeof value[SymbolToPrimitive] === "function") { + const primitive = value[SymbolToPrimitive]("string"); + if (typeof primitive === "string") { + return BufferStatic.fromString(primitive, encodingOrOffset); + } + } + + if (isArray(value)) { + return BufferStatic.fromArray(value, encodingOrOffset, length); + } + } + } + + throw new TypeError( + "First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object." + ); + } + + slice(start, end) { + return Uint8ArraySubarray.call(this, start, end); + } + + static get poolSize() { + return BufferStatic._poolSize; + } + + static set poolSize(value) { + BufferStatic._poolSize = value; + } + + get parent() { + return this.buffer; + } + + get offset() { + return this.byteOffset; + } + }; + + Object.assign(BufferInstance, BufferStatic); + Object.assign(BufferInstance.prototype, BufferPrototype); + Object.defineProperty(BufferInstance, "name", { + value: "Buffer", + configurable: false, + enumerable: false, + }); + + return BufferInstance; +} diff --git a/src/bun.js/node/buffer.zig b/src/bun.js/node/buffer.zig new file mode 100644 index 000000000..412c61722 --- /dev/null +++ b/src/bun.js/node/buffer.zig @@ -0,0 +1,94 @@ +const std = @import("std"); +const bun = @import("../../global.zig"); +const strings = bun.strings; +const string = bun.string; +const AsyncIO = @import("io"); +const JSC = @import("../../jsc.zig"); +const PathString = JSC.PathString; +const Environment = bun.Environment; +const C = bun.C; +const Syscall = @import("./syscall.zig"); +const os = std.os; + +const JSGlobalObject = JSC.JSGlobalObject; +const ArgumentsSlice = JSC.Node.ArgumentsSlice; + +pub const BufferVectorized = struct { + extern fn memset_pattern16(b: *anyopaque, pattern16: *const anyopaque, len: usize) void; + + pub fn fill( + globalObject: *JSGlobalObject, + this: *JSC.ArrayBuffer, + str: *JSC.ZigString, + start: u32, + end: u32, + encoding: JSC.Node.Encoding, + ) callconv(.C) void { + const allocator = JSC.VirtualMachine.vm.allocator; + var stack_fallback = std.heap.stackFallback(512, allocator); + var stack_fallback_allocator = stack_fallback.get(); + var input_string = str.toSlice(stack_fallback_allocator); + if (input_string.len == 0) return; + + defer input_string.deinit(); + + var buf = this.slice()[start..end]; + + var slice = input_string.slice(); + switch (encoding) { + JSC.Node.Encoding.utf8, + JSC.Node.Encoding.ascii, + JSC.Node.Encoding.latin1, + JSC.Node.Encoding.buffer, + => { + switch (slice.len) { + 0 => unreachable, + 1 => { + @memset(buf.ptr, slice[0], 1); + return; + }, + 2...16 => { + if (comptime Environment.isMac) { + var pattern: [16]u8 = undefined; + var remain: []u8 = pattern[0..]; + + while (remain.len > 0) { + for (slice[0..]) |a| { + remain[0] = a; + remain = remain[1..]; + } + } + + memset_pattern16(buf.ptr, &pattern, buf.len); + return; + } + }, + else => {}, + } + + var in_there = @minimum(slice.len, buf.len); + @memcpy(buf.ptr, slice.ptr, in_there); + if (in_there < slice.len) { + return; + } + + // var ptr = buf.ptr + @as(usize, start) + slice.len; + + // const fill_length = @as(usize, end) - @as(usize, start); + + // // while (in_there < fill_length - in_there) { + // // std.mem.copy(ptr) + // // ptr += in_there; + // // in_there *= 2; + // // } + }, + else => {}, + } + } +}; + +comptime { + if (!JSC.is_bindgen) { + @export(BufferVectorized, .{ .name = "Bun__Buffer__fill" }); + } +} diff --git a/src/bun.js/node/dir_iterator.zig b/src/bun.js/node/dir_iterator.zig new file mode 100644 index 000000000..19db4177d --- /dev/null +++ b/src/bun.js/node/dir_iterator.zig @@ -0,0 +1,347 @@ +// This is copied from std.fs.Dir.Iterator +// The differences are: +// - it returns errors in the expected format +// - doesn't mark BADF as unreachable +// - It uses PathString instead of []const u8 + +const builtin = @import("builtin"); +const std = @import("std"); +const os = std.os; + +const Dir = std.fs.Dir; +const JSC = @import("../../jsc.zig"); +const PathString = JSC.PathString; + +const IteratorError = error{ AccessDenied, SystemResources } || os.UnexpectedError; +const mem = std.mem; +const strings = @import("../../global.zig").strings; +const Maybe = JSC.Maybe; +const File = std.fs.File; +const Result = Maybe(?Entry); + +const Entry = JSC.Node.DirEnt; + +pub const Iterator = switch (builtin.os.tag) { + .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => struct { + dir: Dir, + seek: i64, + buf: [8192]u8, // TODO align(@alignOf(os.system.dirent)), + index: usize, + end_index: usize, + + const Self = @This(); + + pub const Error = IteratorError; + + /// Memory such as file names referenced in this returned entry becomes invalid + /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. + const next = switch (builtin.os.tag) { + .macos, .ios => nextDarwin, + // .freebsd, .netbsd, .dragonfly, .openbsd => nextBsd, + // .solaris => nextSolaris, + else => @compileError("unimplemented"), + }; + + fn nextDarwin(self: *Self) Result { + start_over: while (true) { + if (self.index >= self.end_index) { + const rc = os.system.__getdirentries64( + self.dir.fd, + &self.buf, + self.buf.len, + &self.seek, + ); + + if (rc < 1) { + if (rc == 0) return Result{ .result = null }; + if (Result.errnoSys(rc, .getdirentries64)) |err| { + return err; + } + } + + self.index = 0; + self.end_index = @intCast(usize, rc); + } + const darwin_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]); + const next_index = self.index + darwin_entry.reclen(); + self.index = next_index; + + const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen]; + + if (strings.eqlComptime(name, ".") or strings.eqlComptime(name, "..") or (darwin_entry.d_ino == 0)) { + continue :start_over; + } + + const entry_kind = switch (darwin_entry.d_type) { + os.DT.BLK => Entry.Kind.BlockDevice, + os.DT.CHR => Entry.Kind.CharacterDevice, + os.DT.DIR => Entry.Kind.Directory, + os.DT.FIFO => Entry.Kind.NamedPipe, + os.DT.LNK => Entry.Kind.SymLink, + os.DT.REG => Entry.Kind.File, + os.DT.SOCK => Entry.Kind.UnixDomainSocket, + os.DT.WHT => Entry.Kind.Whiteout, + else => Entry.Kind.Unknown, + }; + return .{ + .result = Entry{ + .name = PathString.init(name), + .kind = entry_kind, + }, + }; + } + } + }, + + .linux => struct { + dir: Dir, + // The if guard is solely there to prevent compile errors from missing `linux.dirent64` + // definition when compiling for other OSes. It doesn't do anything when compiling for Linux. + buf: [8192]u8 align(if (builtin.os.tag != .linux) 1 else @alignOf(linux.dirent64)), + index: usize, + end_index: usize, + + const Self = @This(); + const linux = os.linux; + + pub const Error = IteratorError; + + /// Memory such as file names referenced in this returned entry becomes invalid + /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. + pub fn next(self: *Self) Result { + start_over: while (true) { + if (self.index >= self.end_index) { + const rc = linux.getdents64(self.dir.fd, &self.buf, self.buf.len); + if (Result.errnoSys(rc, .getdents64)) |err| return err; + if (rc == 0) return .{ .result = null }; + self.index = 0; + self.end_index = rc; + } + const linux_entry = @ptrCast(*align(1) linux.dirent64, &self.buf[self.index]); + const next_index = self.index + linux_entry.reclen(); + self.index = next_index; + + const name = mem.sliceTo(@ptrCast([*:0]u8, &linux_entry.d_name), 0); + + // skip . and .. entries + if (strings.eqlComptime(name, ".") or strings.eqlComptime(name, "..")) { + continue :start_over; + } + + const entry_kind = switch (linux_entry.d_type) { + linux.DT.BLK => Entry.Kind.BlockDevice, + linux.DT.CHR => Entry.Kind.CharacterDevice, + linux.DT.DIR => Entry.Kind.Directory, + linux.DT.FIFO => Entry.Kind.NamedPipe, + linux.DT.LNK => Entry.Kind.SymLink, + linux.DT.REG => Entry.Kind.File, + linux.DT.SOCK => Entry.Kind.UnixDomainSocket, + else => Entry.Kind.Unknown, + }; + return .{ + .result = Entry{ + .name = PathString.init(name), + .kind = entry_kind, + }, + }; + } + } + }, + .windows => struct { + dir: Dir, + buf: [8192]u8 align(@alignOf(os.windows.FILE_BOTH_DIR_INFORMATION)), + index: usize, + end_index: usize, + first: bool, + name_data: [256]u8, + + const Self = @This(); + + pub const Error = IteratorError; + + /// Memory such as file names referenced in this returned entry becomes invalid + /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. + pub fn next(self: *Self) Result { + while (true) { + const w = os.windows; + if (self.index >= self.end_index) { + var io: w.IO_STATUS_BLOCK = undefined; + const rc = w.ntdll.NtQueryDirectoryFile( + self.dir.fd, + null, + null, + null, + &io, + &self.buf, + self.buf.len, + .FileBothDirectoryInformation, + w.FALSE, + null, + if (self.first) @as(w.BOOLEAN, w.TRUE) else @as(w.BOOLEAN, w.FALSE), + ); + self.first = false; + if (io.Information == 0) return .{ .result = null }; + self.index = 0; + self.end_index = io.Information; + switch (rc) { + .SUCCESS => {}, + .ACCESS_DENIED => return error.AccessDenied, // Double-check that the Dir was opened with iteration ability + + else => return w.unexpectedStatus(rc), + } + } + + const aligned_ptr = @alignCast(@alignOf(w.FILE_BOTH_DIR_INFORMATION), &self.buf[self.index]); + const dir_info = @ptrCast(*w.FILE_BOTH_DIR_INFORMATION, aligned_ptr); + if (dir_info.NextEntryOffset != 0) { + self.index += dir_info.NextEntryOffset; + } else { + self.index = self.buf.len; + } + + const name_utf16le = @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2]; + + if (mem.eql(u16, name_utf16le, &[_]u16{'.'}) or mem.eql(u16, name_utf16le, &[_]u16{ '.', '.' })) + continue; + // Trust that Windows gives us valid UTF-16LE + const name_utf8_len = std.unicode.utf16leToUtf8(self.name_data[0..], name_utf16le) catch unreachable; + const name_utf8 = self.name_data[0..name_utf8_len]; + const kind = blk: { + const attrs = dir_info.FileAttributes; + if (attrs & w.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk Entry.Kind.Directory; + if (attrs & w.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk Entry.Kind.SymLink; + break :blk Entry.Kind.File; + }; + return .{ + .result = Entry{ + .name = PathString.init(name_utf8), + .kind = kind, + }, + }; + } + } + }, + .wasi => struct { + dir: Dir, + buf: [8192]u8, // TODO align(@alignOf(os.wasi.dirent_t)), + cookie: u64, + index: usize, + end_index: usize, + + const Self = @This(); + + pub const Error = IteratorError; + + /// Memory such as file names referenced in this returned entry becomes invalid + /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. + pub fn next(self: *Self) Result { + // We intentinally use fd_readdir even when linked with libc, + // since its implementation is exactly the same as below, + // and we avoid the code complexity here. + const w = os.wasi; + start_over: while (true) { + if (self.index >= self.end_index) { + var bufused: usize = undefined; + switch (w.fd_readdir(self.dir.fd, &self.buf, self.buf.len, self.cookie, &bufused)) { + .SUCCESS => {}, + .BADF => unreachable, // Dir is invalid or was opened without iteration ability + .FAULT => unreachable, + .NOTDIR => unreachable, + .INVAL => unreachable, + .NOTCAPABLE => return error.AccessDenied, + else => |err| return os.unexpectedErrno(err), + } + if (bufused == 0) return null; + self.index = 0; + self.end_index = bufused; + } + const entry = @ptrCast(*align(1) w.dirent_t, &self.buf[self.index]); + const entry_size = @sizeOf(w.dirent_t); + const name_index = self.index + entry_size; + const name = mem.span(self.buf[name_index .. name_index + entry.d_namlen]); + + const next_index = name_index + entry.d_namlen; + self.index = next_index; + self.cookie = entry.d_next; + + // skip . and .. entries + if (strings.eqlComptime(name, ".") or strings.eqlComptime(name, "..")) { + continue :start_over; + } + + const entry_kind = switch (entry.d_type) { + .BLOCK_DEVICE => Entry.Kind.BlockDevice, + .CHARACTER_DEVICE => Entry.Kind.CharacterDevice, + .DIRECTORY => Entry.Kind.Directory, + .SYMBOLIC_LINK => Entry.Kind.SymLink, + .REGULAR_FILE => Entry.Kind.File, + .SOCKET_STREAM, .SOCKET_DGRAM => Entry.Kind.UnixDomainSocket, + else => Entry.Kind.Unknown, + }; + return Entry{ + .name = name, + .kind = entry_kind, + }; + } + } + }, + else => @compileError("unimplemented"), +}; + +const WrappedIterator = struct { + iter: Iterator, + const Self = @This(); + + pub const Error = IteratorError; + + pub inline fn next(self: *Self) Result { + return self.iter.next(); + } +}; + +pub fn iterate(self: Dir) WrappedIterator { + return WrappedIterator{ + .iter = _iterate(self), + }; +} + +fn _iterate(self: Dir) Iterator { + switch (builtin.os.tag) { + .macos, + .ios, + .freebsd, + .netbsd, + .dragonfly, + .openbsd, + .solaris, + => return Iterator{ + .dir = self, + .seek = 0, + .index = 0, + .end_index = 0, + .buf = undefined, + }, + .linux, .haiku => return Iterator{ + .dir = self, + .index = 0, + .end_index = 0, + .buf = undefined, + }, + .windows => return Iterator{ + .dir = self, + .index = 0, + .end_index = 0, + .first = true, + .buf = undefined, + .name_data = undefined, + }, + .wasi => return Iterator{ + .dir = self, + .cookie = os.wasi.DIRCOOKIE_START, + .index = 0, + .end_index = 0, + .buf = undefined, + }, + else => @compileError("unimplemented"), + } +} diff --git a/src/bun.js/node/node_fs.zig b/src/bun.js/node/node_fs.zig new file mode 100644 index 000000000..3cacb57b2 --- /dev/null +++ b/src/bun.js/node/node_fs.zig @@ -0,0 +1,3657 @@ +// This file contains the underlying implementation for sync & async functions +// for interacting with the filesystem from JavaScript. +// The top-level functions assume the arguments are already validated +const std = @import("std"); +const bun = @import("../../global.zig"); +const strings = bun.strings; +const string = bun.string; +const AsyncIO = @import("io"); +const JSC = @import("../../jsc.zig"); +const PathString = JSC.PathString; +const Environment = bun.Environment; +const C = bun.C; +const Flavor = JSC.Node.Flavor; +const system = std.os.system; +const Maybe = JSC.Maybe; +const Encoding = JSC.Node.Encoding; +const Syscall = @import("./syscall.zig"); +const Constants = @import("./node_fs_constant.zig").Constants; +const builtin = @import("builtin"); +const os = @import("std").os; +const darwin = os.darwin; +const linux = os.linux; +const PathOrBuffer = JSC.Node.PathOrBuffer; +const PathLike = JSC.Node.PathLike; +const PathOrFileDescriptor = JSC.Node.PathOrFileDescriptor; +const FileDescriptor = JSC.Node.FileDescriptor; +const DirIterator = @import("./dir_iterator.zig"); +const Path = @import("../../resolver/resolve_path.zig"); +const FileSystem = @import("../../fs.zig").FileSystem; +const StringOrBuffer = JSC.Node.StringOrBuffer; +const ArgumentsSlice = JSC.Node.ArgumentsSlice; +const TimeLike = JSC.Node.TimeLike; +const Mode = JSC.Node.Mode; + +const uid_t = std.os.uid_t; +const gid_t = std.os.gid_t; + +/// u63 to allow one null bit +const ReadPosition = u63; + +const Stats = JSC.Node.Stats; +const BigIntStats = JSC.Node.BigIntStats; +const DirEnt = JSC.Node.DirEnt; + +pub const FlavoredIO = struct { + io: *AsyncIO, +}; + +pub const default_permission = Syscall.S.IRUSR | + Syscall.S.IWUSR | + Syscall.S.IRGRP | + Syscall.S.IWGRP | + Syscall.S.IROTH | + Syscall.S.IWOTH; + +const ArrayBuffer = JSC.MarkedArrayBuffer; +const Buffer = JSC.Buffer; +const FileSystemFlags = JSC.Node.FileSystemFlags; + +// TODO: to improve performance for all of these +// The tagged unions for each type should become regular unions +// and the tags should be passed in as comptime arguments to the functions performing the syscalls +// This would reduce stack size, at the cost of instruction cache misses +const Arguments = struct { + pub const Rename = struct { + old_path: PathLike, + new_path: PathLike, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Rename { + const old_path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "oldPath must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + const new_path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "newPath must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + return Rename{ .old_path = old_path, .new_path = new_path }; + } + }; + + pub const Truncate = struct { + /// Passing a file descriptor is deprecated and may result in an error being thrown in the future. + path: PathOrFileDescriptor, + len: JSC.WebCore.Blob.SizeType = 0, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Truncate { + const path = PathOrFileDescriptor.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + const len: JSC.WebCore.Blob.SizeType = brk: { + const len_value = arguments.next() orelse break :brk 0; + + if (len_value.isNumber()) { + arguments.eat(); + break :brk len_value.to(JSC.WebCore.Blob.SizeType); + } + + break :brk 0; + }; + + return Truncate{ .path = path, .len = len }; + } + }; + + pub const FTruncate = struct { + fd: FileDescriptor, + len: ?JSC.WebCore.Blob.SizeType = null, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?FTruncate { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "file descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "file descriptor must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + + if (exception.* != null) return null; + + const len: JSC.WebCore.Blob.SizeType = brk: { + const len_value = arguments.next() orelse break :brk 0; + if (len_value.isNumber()) { + arguments.eat(); + break :brk len_value.to(JSC.WebCore.Blob.SizeType); + } + + break :brk 0; + }; + + return FTruncate{ .fd = fd, .len = len }; + } + }; + + pub const Chown = struct { + path: PathLike, + uid: uid_t = 0, + gid: gid_t = 0, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Chown { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + const uid: uid_t = brk: { + const uid_value = arguments.next() orelse break :brk { + if (exception.* == null) { + JSC.throwInvalidArguments( + "uid is required", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + break :brk @intCast(uid_t, uid_value.toInt32()); + }; + + const gid: gid_t = brk: { + const gid_value = arguments.next() orelse break :brk { + if (exception.* == null) { + JSC.throwInvalidArguments( + "gid is required", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + break :brk @intCast(gid_t, gid_value.toInt32()); + }; + + return Chown{ .path = path, .uid = uid, .gid = gid }; + } + }; + + pub const Fchown = struct { + fd: FileDescriptor, + uid: uid_t, + gid: gid_t, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Fchown { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "file descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "file descriptor must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + const uid: uid_t = brk: { + const uid_value = arguments.next() orelse break :brk { + if (exception.* == null) { + JSC.throwInvalidArguments( + "uid is required", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + break :brk @intCast(uid_t, uid_value.toInt32()); + }; + + const gid: gid_t = brk: { + const gid_value = arguments.next() orelse break :brk { + if (exception.* == null) { + JSC.throwInvalidArguments( + "gid is required", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + break :brk @intCast(gid_t, gid_value.toInt32()); + }; + + return Fchown{ .fd = fd, .uid = uid, .gid = gid }; + } + }; + + pub const LChown = Chown; + + pub const Lutimes = struct { + path: PathLike, + atime: TimeLike, + mtime: TimeLike, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Lutimes { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + const atime = JSC.Node.timeLikeFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "atime is required", + .{}, + ctx, + exception, + ); + } + + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "atime must be a number or a Date", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + + const mtime = JSC.Node.timeLikeFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "mtime is required", + .{}, + ctx, + exception, + ); + } + + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "mtime must be a number or a Date", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + + return Lutimes{ .path = path, .atime = atime, .mtime = mtime }; + } + }; + + pub const Chmod = struct { + path: PathLike, + mode: Mode = 0x777, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Chmod { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + const mode: Mode = JSC.Node.modeFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "mode is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "mode must be a string or integer", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + + return Chmod{ .path = path, .mode = mode }; + } + }; + + pub const FChmod = struct { + fd: FileDescriptor, + mode: Mode = 0x777, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?FChmod { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "file descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "file descriptor must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + arguments.eat(); + + const mode: Mode = JSC.Node.modeFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "mode is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "mode must be a string or integer", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + + return FChmod{ .fd = fd, .mode = mode }; + } + }; + + pub const LCHmod = Chmod; + + pub const Stat = struct { + path: PathLike, + big_int: bool = false, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Stat { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + const big_int = brk: { + if (arguments.next()) |next_val| { + if (next_val.isObject()) { + if (next_val.isCallable(ctx.ptr().vm())) break :brk false; + arguments.eat(); + + if (next_val.getIfPropertyExists(ctx.ptr(), "bigint")) |big_int| { + break :brk big_int.toBoolean(); + } + } + } + break :brk false; + }; + + if (exception.* != null) return null; + + return Stat{ .path = path, .big_int = big_int }; + } + }; + + pub const Fstat = struct { + fd: FileDescriptor, + big_int: bool = false, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Fstat { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "file descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "file descriptor must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + const big_int = brk: { + if (arguments.next()) |next_val| { + if (next_val.isObject()) { + if (next_val.isCallable(ctx.ptr().vm())) break :brk false; + arguments.eat(); + + if (next_val.getIfPropertyExists(ctx.ptr(), "bigint")) |big_int| { + break :brk big_int.toBoolean(); + } + } + } + break :brk false; + }; + + if (exception.* != null) return null; + + return Fstat{ .fd = fd, .big_int = big_int }; + } + }; + + pub const Lstat = Stat; + + pub const Link = struct { + old_path: PathLike, + new_path: PathLike, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Link { + const old_path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "oldPath must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + const new_path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "newPath must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + return Link{ .old_path = old_path, .new_path = new_path }; + } + }; + + pub const Symlink = struct { + old_path: PathLike, + new_path: PathLike, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Symlink { + const old_path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "target must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + const new_path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + if (arguments.next()) |next_val| { + // The type argument is only available on Windows and + // ignored on other platforms. It can be set to 'dir', + // 'file', or 'junction'. If the type argument is not set, + // Node.js will autodetect target type and use 'file' or + // 'dir'. If the target does not exist, 'file' will be used. + // Windows junction points require the destination path to + // be absolute. When using 'junction', the target argument + // will automatically be normalized to absolute path. + if (next_val.isString()) { + comptime if (Environment.isWindows) @compileError("Add support for type argument on Windows"); + arguments.eat(); + } + } + + return Symlink{ .old_path = old_path, .new_path = new_path }; + } + }; + + pub const Readlink = struct { + path: PathLike, + encoding: Encoding = Encoding.utf8, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Readlink { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + var encoding = Encoding.utf8; + if (arguments.next()) |val| { + arguments.eat(); + + switch (val.jsType()) { + JSC.JSValue.JSType.String, JSC.JSValue.JSType.StringObject, JSC.JSValue.JSType.DerivedStringObject => { + encoding = Encoding.fromStringValue(val, ctx.ptr()) orelse Encoding.utf8; + }, + else => { + if (val.isObject()) { + if (val.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding_| { + encoding = Encoding.fromStringValue(encoding_, ctx.ptr()) orelse Encoding.utf8; + } + } + }, + } + } + + return Readlink{ .path = path, .encoding = encoding }; + } + }; + + pub const Realpath = struct { + path: PathLike, + encoding: Encoding = Encoding.utf8, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Realpath { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + var encoding = Encoding.utf8; + if (arguments.next()) |val| { + arguments.eat(); + + switch (val.jsType()) { + JSC.JSValue.JSType.String, JSC.JSValue.JSType.StringObject, JSC.JSValue.JSType.DerivedStringObject => { + encoding = Encoding.fromStringValue(val, ctx.ptr()) orelse Encoding.utf8; + }, + else => { + if (val.isObject()) { + if (val.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding_| { + encoding = Encoding.fromStringValue(encoding_, ctx.ptr()) orelse Encoding.utf8; + } + } + }, + } + } + + return Realpath{ .path = path, .encoding = encoding }; + } + }; + + pub const Unlink = struct { + path: PathLike, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Unlink { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + return Unlink{ + .path = path, + }; + } + }; + + pub const Rm = struct { + path: PathLike, + force: bool = false, + max_retries: u32 = 0, + recursive: bool = false, + retry_delay: c_uint = 100, + }; + + pub const RmDir = struct { + path: PathLike, + + max_retries: u32 = 0, + recursive: bool = false, + retry_delay: c_uint = 100, + }; + + /// https://github.com/nodejs/node/blob/master/lib/fs.js#L1285 + pub const Mkdir = struct { + path: PathLike, + /// Indicates whether parent folders should be created. + /// If a folder was created, the path to the first created folder will be returned. + /// @default false + recursive: bool = false, + /// A file mode. If a string is passed, it is parsed as an octal integer. If not specified + /// @default + mode: Mode = 0o777, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Mkdir { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + var recursive = false; + var mode: Mode = 0o777; + + if (arguments.next()) |val| { + arguments.eat(); + + if (val.isObject()) { + if (val.getIfPropertyExists(ctx.ptr(), "recursive")) |recursive_| { + recursive = recursive_.toBoolean(); + } + + if (val.getIfPropertyExists(ctx.ptr(), "mode")) |mode_| { + mode = JSC.Node.modeFromJS(ctx, mode_, exception) orelse mode; + } + } + } + + return Mkdir{ + .path = path, + .recursive = recursive, + .mode = mode, + }; + } + }; + + const MkdirTemp = struct { + prefix: string = "", + encoding: Encoding = Encoding.utf8, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?MkdirTemp { + const prefix_value = arguments.next() orelse return MkdirTemp{}; + + var prefix = JSC.ZigString.Empty; + prefix_value.toZigString(&prefix, ctx.ptr()); + + if (exception.* != null) return null; + + arguments.eat(); + + var encoding = Encoding.utf8; + + if (arguments.next()) |val| { + arguments.eat(); + + switch (val.jsType()) { + JSC.JSValue.JSType.String, JSC.JSValue.JSType.StringObject, JSC.JSValue.JSType.DerivedStringObject => { + encoding = Encoding.fromStringValue(val, ctx.ptr()) orelse Encoding.utf8; + }, + else => { + if (val.isObject()) { + if (val.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding_| { + encoding = Encoding.fromStringValue(encoding_, ctx.ptr()) orelse Encoding.utf8; + } + } + }, + } + } + + return MkdirTemp{ + .prefix = prefix.slice(), + .encoding = encoding, + }; + } + }; + + pub const Readdir = struct { + path: PathLike, + encoding: Encoding = Encoding.utf8, + with_file_types: bool = false, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Readdir { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + var encoding = Encoding.utf8; + var with_file_types = false; + + if (arguments.next()) |val| { + arguments.eat(); + + switch (val.jsType()) { + JSC.JSValue.JSType.String, JSC.JSValue.JSType.StringObject, JSC.JSValue.JSType.DerivedStringObject => { + encoding = Encoding.fromStringValue(val, ctx.ptr()) orelse Encoding.utf8; + }, + else => { + if (val.isObject()) { + if (val.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding_| { + encoding = Encoding.fromStringValue(encoding_, ctx.ptr()) orelse Encoding.utf8; + } + + if (val.getIfPropertyExists(ctx.ptr(), "withFileTypes")) |with_file_types_| { + with_file_types = with_file_types_.toBoolean(); + } + } + }, + } + } + + return Readdir{ + .path = path, + .encoding = encoding, + .with_file_types = with_file_types, + }; + } + }; + + pub const Close = struct { + fd: FileDescriptor, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Close { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "File descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "fd must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + return Close{ + .fd = fd, + }; + } + }; + + pub const Open = struct { + path: PathLike, + flags: FileSystemFlags = FileSystemFlags.@"r", + mode: Mode = default_permission, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Open { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + var flags = FileSystemFlags.@"r"; + var mode: Mode = default_permission; + + if (arguments.next()) |val| { + arguments.eat(); + + if (val.isObject()) { + if (val.getIfPropertyExists(ctx.ptr(), "flags")) |flags_| { + flags = FileSystemFlags.fromJS(ctx, flags_, exception) orelse flags; + } + + if (val.getIfPropertyExists(ctx.ptr(), "mode")) |mode_| { + mode = JSC.Node.modeFromJS(ctx, mode_, exception) orelse mode; + } + } else if (!val.isEmpty()) { + flags = FileSystemFlags.fromJS(ctx, val, exception) orelse flags; + + if (arguments.nextEat()) |next| { + mode = JSC.Node.modeFromJS(ctx, next, exception) orelse mode; + } + } + } + + if (exception.* != null) return null; + + return Open{ + .path = path, + .flags = flags, + .mode = mode, + }; + } + }; + + /// Change the file system timestamps of the object referenced by `path`. + /// + /// The `atime` and `mtime` arguments follow these rules: + /// + /// * Values can be either numbers representing Unix epoch time in seconds,`Date`s, or a numeric string like `'123456789.0'`. + /// * If the value can not be converted to a number, or is `NaN`, `Infinity` or`-Infinity`, an `Error` will be thrown. + /// @since v0.4.2 + pub const Utimes = Lutimes; + + pub const Futimes = struct { + fd: FileDescriptor, + atime: TimeLike, + mtime: TimeLike, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Futimes { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "File descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "fd must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + arguments.eat(); + if (exception.* != null) return null; + + const atime = JSC.Node.timeLikeFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "atime is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "atime must be a number, Date or string", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + const mtime = JSC.Node.timeLikeFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "mtime is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "mtime must be a number, Date or string", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + return Futimes{ + .fd = fd, + .atime = atime, + .mtime = mtime, + }; + } + }; + + pub const FSync = struct { + fd: FileDescriptor, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?FSync { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "File descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "fd must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + return FSync{ + .fd = fd, + }; + } + }; + + /// Write `buffer` to the file specified by `fd`. If `buffer` is a normal object, it + /// must have an own `toString` function property. + /// + /// `offset` determines the part of the buffer to be written, and `length` is + /// an integer specifying the number of bytes to write. + /// + /// `position` refers to the offset from the beginning of the file where this data + /// should be written. If `typeof position !== 'number'`, the data will be written + /// at the current position. See [`pwrite(2)`](http://man7.org/linux/man-pages/man2/pwrite.2.html). + /// + /// The callback will be given three arguments `(err, bytesWritten, buffer)` where`bytesWritten` specifies how many _bytes_ were written from `buffer`. + /// + /// If this method is invoked as its `util.promisify()` ed version, it returns + /// a promise for an `Object` with `bytesWritten` and `buffer` properties. + /// + /// It is unsafe to use `fs.write()` multiple times on the same file without waiting + /// for the callback. For this scenario, {@link createWriteStream} is + /// recommended. + /// + /// On Linux, positional writes don't work when the file is opened in append mode. + /// The kernel ignores the position argument and always appends the data to + /// the end of the file. + /// @since v0.0.2 + /// + pub const Write = struct { + fd: FileDescriptor, + buffer: StringOrBuffer, + // buffer_val: JSC.JSValue = JSC.JSValue.zero, + offset: u64 = 0, + length: u64 = std.math.maxInt(u64), + position: ?ReadPosition = null, + encoding: Encoding = Encoding.buffer, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Write { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "File descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "fd must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + + if (exception.* != null) return null; + + const buffer = StringOrBuffer.fromJS(ctx.ptr(), arguments.arena.allocator(), arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "data is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "data must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + if (exception.* != null) return null; + + var args = Write{ + .fd = fd, + .buffer = buffer, + .encoding = switch (buffer) { + .string => Encoding.utf8, + .buffer => Encoding.buffer, + }, + }; + + arguments.eat(); + + // TODO: make this faster by passing argument count at comptime + if (arguments.next()) |current_| { + parse: { + var current = current_; + switch (buffer) { + // fs.write(fd, string[, position[, encoding]], callback) + .string => { + if (current.isNumber()) { + args.position = current.toU32(); + arguments.eat(); + current = arguments.next() orelse break :parse; + } + + if (current.isString()) { + args.encoding = Encoding.fromStringValue(current, ctx.ptr()) orelse Encoding.utf8; + arguments.eat(); + } + }, + // fs.write(fd, buffer[, offset[, length[, position]]], callback) + .buffer => { + if (!current.isNumber()) { + break :parse; + } + + if (!current.isNumber()) break :parse; + args.offset = current.toU32(); + arguments.eat(); + current = arguments.next() orelse break :parse; + + if (!current.isNumber()) break :parse; + args.length = current.toU32(); + arguments.eat(); + current = arguments.next() orelse break :parse; + + if (!current.isNumber()) break :parse; + args.position = current.toU32(); + arguments.eat(); + }, + } + } + } + + return args; + } + }; + + pub const Read = struct { + fd: FileDescriptor, + buffer: Buffer, + offset: u64 = 0, + length: u64 = std.math.maxInt(u64), + position: ?ReadPosition = null, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Read { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "File descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "fd must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + arguments.eat(); + + if (exception.* != null) return null; + + const buffer = Buffer.fromJS(ctx.ptr(), arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "buffer is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "buffer must be a TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + arguments.eat(); + + var args = Read{ + .fd = fd, + .buffer = buffer, + }; + + if (arguments.next()) |current| { + arguments.eat(); + if (current.isNumber()) { + args.offset = current.toU32(); + + if (arguments.remaining.len < 2) { + JSC.throwInvalidArguments( + "length and position are required", + .{}, + ctx, + exception, + ); + + return null; + } + + args.length = arguments.remaining[0].toU32(); + + if (args.length == 0) { + JSC.throwInvalidArguments( + "length must be greater than 0", + .{}, + ctx, + exception, + ); + + return null; + } + + const position: i32 = if (arguments.remaining[1].isNumber()) + arguments.remaining[1].toInt32() + else + -1; + + args.position = if (position > -1) @intCast(ReadPosition, position) else null; + arguments.remaining = arguments.remaining[2..]; + } else if (current.isObject()) { + if (current.getIfPropertyExists(ctx.ptr(), "offset")) |num| { + args.offset = num.toU32(); + } + + if (current.getIfPropertyExists(ctx.ptr(), "length")) |num| { + args.length = num.toU32(); + } + + if (current.getIfPropertyExists(ctx.ptr(), "position")) |num| { + const position: i32 = if (num.isEmptyOrUndefinedOrNull()) -1 else num.toInt32(); + if (position > -1) { + args.position = @intCast(ReadPosition, position); + } + } + } + } + + return args; + } + }; + + /// Asynchronously reads the entire contents of a file. + /// @param path A path to a file. If a URL is provided, it must use the `file:` protocol. + /// If a file descriptor is provided, the underlying file will _not_ be closed automatically. + /// @param options Either the encoding for the result, or an object that contains the encoding and an optional flag. + /// If a flag is not provided, it defaults to `'r'`. + pub const ReadFile = struct { + path: PathOrFileDescriptor, + encoding: Encoding = Encoding.utf8, + + flag: FileSystemFlags = FileSystemFlags.@"r", + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?ReadFile { + const path = PathOrFileDescriptor.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or a file descriptor", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + var encoding = Encoding.buffer; + var flag = FileSystemFlags.@"r"; + + if (arguments.next()) |arg| { + arguments.eat(); + if (arg.isString()) { + encoding = Encoding.fromStringValue(arg, ctx.ptr()) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } else if (arg.isObject()) { + if (arg.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding_| { + if (!encoding_.isUndefinedOrNull()) { + encoding = Encoding.fromStringValue(encoding_, ctx.ptr()) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + } + + if (arg.getIfPropertyExists(ctx.ptr(), "flag")) |flag_| { + flag = FileSystemFlags.fromJS(ctx, flag_, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid flag", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + } + } + + // Note: Signal is not implemented + return ReadFile{ + .path = path, + .encoding = encoding, + .flag = flag, + }; + } + }; + + pub const WriteFile = struct { + encoding: Encoding = Encoding.utf8, + flag: FileSystemFlags = FileSystemFlags.@"w", + mode: Mode = 0666, + file: PathOrFileDescriptor, + data: StringOrBuffer, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?WriteFile { + const file = PathOrFileDescriptor.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or a file descriptor", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + const data = StringOrBuffer.fromJS(ctx.ptr(), arguments.arena.allocator(), arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "data is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "data must be a string or TypedArray", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + arguments.eat(); + + var encoding = Encoding.buffer; + var flag = FileSystemFlags.@"w"; + var mode: Mode = default_permission; + + if (arguments.next()) |arg| { + arguments.eat(); + if (arg.isString()) { + encoding = Encoding.fromStringValue(arg, ctx.ptr()) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } else if (arg.isObject()) { + if (arg.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding_| { + if (!encoding_.isUndefinedOrNull()) { + encoding = Encoding.fromStringValue(encoding_, ctx.ptr()) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + } + + if (arg.getIfPropertyExists(ctx.ptr(), "flag")) |flag_| { + flag = FileSystemFlags.fromJS(ctx, flag_, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid flag", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "mode")) |mode_| { + mode = JSC.Node.modeFromJS(ctx, mode_, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid flag", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + } + } + + // Note: Signal is not implemented + return WriteFile{ + .file = file, + .encoding = encoding, + .flag = flag, + .mode = mode, + .data = data, + }; + } + }; + + pub const AppendFile = WriteFile; + + pub const OpenDir = struct { + path: PathLike, + encoding: Encoding = Encoding.utf8, + + /// Number of directory entries that are buffered internally when reading from the directory. Higher values lead to better performance but higher memory usage. Default: 32 + buffer_size: c_int = 32, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?OpenDir { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or a file descriptor", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + var encoding = Encoding.buffer; + var buffer_size: c_int = 32; + + if (arguments.next()) |arg| { + arguments.eat(); + if (arg.isString()) { + encoding = Encoding.fromStringValue(arg, ctx.ptr()) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } else if (arg.isObject()) { + if (arg.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding_| { + if (!encoding_.isUndefinedOrNull()) { + encoding = Encoding.fromStringValue(encoding_, ctx.ptr()) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + } + + if (arg.getIfPropertyExists(ctx.ptr(), "bufferSize")) |buffer_size_| { + buffer_size = buffer_size_.toInt32(); + if (buffer_size < 0) { + if (exception.* == null) { + JSC.throwInvalidArguments( + "bufferSize must be > 0", + .{}, + ctx, + exception, + ); + } + return null; + } + } + } + } + + return OpenDir{ + .path = path, + .encoding = encoding, + .buffer_size = buffer_size, + }; + } + }; + pub const Exists = struct { + path: PathLike, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Exists { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or buffer", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + return Exists{ + .path = path, + }; + } + }; + + pub const Access = struct { + path: PathLike, + mode: FileSystemFlags = FileSystemFlags.@"r", + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Access { + const path = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "path must be a string or buffer", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + var mode = FileSystemFlags.@"r"; + + if (arguments.next()) |arg| { + arguments.eat(); + if (arg.isString()) { + mode = FileSystemFlags.fromJS(ctx, arg, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "Invalid mode", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + } + + return Access{ + .path = path, + .mode = mode, + }; + } + }; + + pub const CreateReadStream = struct { + file: PathOrFileDescriptor, + flags: FileSystemFlags = FileSystemFlags.@"r", + encoding: Encoding = Encoding.utf8, + mode: Mode = default_permission, + autoClose: bool = true, + emitClose: bool = true, + start: i32 = 0, + end: i32 = std.math.maxInt(i32), + highwater_mark: u32 = 64 * 1024, + global_object: *JSC.JSGlobalObject, + + pub fn copyToState(this: CreateReadStream, state: *JSC.Node.Readable.State) void { + state.encoding = this.encoding; + state.highwater_mark = this.highwater_mark; + state.start = this.start; + state.end = this.end; + } + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?CreateReadStream { + var path = PathLike.fromJS(ctx, arguments, exception); + if (exception.* != null) return null; + if (path == null) arguments.eat(); + + var stream = CreateReadStream{ + .file = undefined, + .global_object = ctx.ptr(), + }; + var fd: FileDescriptor = std.math.maxInt(FileDescriptor); + + if (arguments.next()) |arg| { + arguments.eat(); + if (arg.isString()) { + stream.encoding = Encoding.fromStringValue(arg, ctx.ptr()) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } else if (arg.isObject()) { + if (arg.getIfPropertyExists(ctx.ptr(), "mode")) |mode_| { + stream.mode = JSC.Node.modeFromJS(ctx, mode_, exception) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid mode", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding| { + stream.encoding = Encoding.fromStringValue(encoding, ctx.ptr()) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "flags")) |flags| { + stream.flags = FileSystemFlags.fromJS(ctx, flags, exception) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid flags", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "fd")) |flags| { + fd = JSC.Node.fileDescriptorFromJS(ctx, flags, exception) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid file descriptor", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "autoClose")) |autoClose| { + stream.autoClose = autoClose.toBoolean(); + } + + if (arg.getIfPropertyExists(ctx.ptr(), "emitClose")) |emitClose| { + stream.emitClose = emitClose.toBoolean(); + } + + if (arg.getIfPropertyExists(ctx.ptr(), "start")) |start| { + stream.start = start.toInt32(); + } + + if (arg.getIfPropertyExists(ctx.ptr(), "end")) |end| { + stream.end = end.toInt32(); + } + + if (arg.getIfPropertyExists(ctx.ptr(), "highWaterMark")) |highwaterMark| { + stream.highwater_mark = highwaterMark.toU32(); + } + } + } + + if (fd != std.math.maxInt(FileDescriptor)) { + stream.file = .{ .fd = fd }; + } else if (path) |path_| { + stream.file = .{ .path = path_ }; + } else { + JSC.throwInvalidArguments("Missing path or file descriptor", .{}, ctx, exception); + return null; + } + return stream; + } + }; + + pub const CreateWriteStream = struct { + file: PathOrFileDescriptor, + flags: FileSystemFlags = FileSystemFlags.@"w", + encoding: Encoding = Encoding.utf8, + mode: Mode = default_permission, + autoClose: bool = true, + emitClose: bool = true, + start: i32 = 0, + highwater_mark: u32 = 256 * 1024, + global_object: *JSC.JSGlobalObject, + + pub fn copyToState(this: CreateWriteStream, state: *JSC.Node.Writable.State) void { + state.encoding = this.encoding; + state.highwater_mark = this.highwater_mark; + state.start = this.start; + state.emit_close = this.emitClose; + } + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?CreateWriteStream { + var path = PathLike.fromJS(ctx, arguments, exception); + if (exception.* != null) return null; + if (path == null) arguments.eat(); + + var stream = CreateWriteStream{ + .file = undefined, + .global_object = ctx.ptr(), + }; + var fd: FileDescriptor = std.math.maxInt(FileDescriptor); + + if (arguments.next()) |arg| { + arguments.eat(); + if (arg.isString()) { + stream.encoding = Encoding.fromStringValue(arg, ctx.ptr()) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } else if (arg.isObject()) { + if (arg.getIfPropertyExists(ctx.ptr(), "mode")) |mode_| { + stream.mode = JSC.Node.modeFromJS(ctx, mode_, exception) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid mode", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "encoding")) |encoding| { + stream.encoding = Encoding.fromStringValue(encoding, ctx.ptr()) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid encoding", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "flags")) |flags| { + stream.flags = FileSystemFlags.fromJS(ctx, flags, exception) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid flags", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "fd")) |flags| { + fd = JSC.Node.fileDescriptorFromJS(ctx, flags, exception) orelse { + if (exception.* != null) { + JSC.throwInvalidArguments( + "Invalid file descriptor", + .{}, + ctx, + exception, + ); + } + return null; + }; + } + + if (arg.getIfPropertyExists(ctx.ptr(), "autoClose")) |autoClose| { + stream.autoClose = autoClose.toBoolean(); + } + + if (arg.getIfPropertyExists(ctx.ptr(), "emitClose")) |emitClose| { + stream.emitClose = emitClose.toBoolean(); + } + + if (arg.getIfPropertyExists(ctx.ptr(), "start")) |start| { + stream.start = start.toInt32(); + } + } + } + + if (fd != std.math.maxInt(FileDescriptor)) { + stream.file = .{ .fd = fd }; + } else if (path) |path_| { + stream.file = .{ .path = path_ }; + } else { + JSC.throwInvalidArguments("Missing path or file descriptor", .{}, ctx, exception); + return null; + } + return stream; + } + }; + + pub const FdataSync = struct { + fd: FileDescriptor, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?FdataSync { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "File descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "fd must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + return FdataSync{ + .fd = fd, + }; + } + }; + + pub const CopyFile = struct { + src: PathLike, + dest: PathLike, + mode: Constants.Copyfile, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?CopyFile { + const src = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "src must be a string or buffer", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + const dest = PathLike.fromJS(ctx, arguments, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "dest must be a string or buffer", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + var mode: i32 = 0; + if (arguments.next()) |arg| { + arguments.eat(); + if (arg.isNumber()) { + mode = arg.toInt32(); + } + } + + return CopyFile{ + .src = src, + .dest = dest, + .mode = @intToEnum(Constants.Copyfile, mode), + }; + } + }; + + pub const WriteEv = struct { + fd: FileDescriptor, + buffers: []const ArrayBuffer, + position: ReadPosition, + }; + + pub const ReadEv = struct { + fd: FileDescriptor, + buffers: []ArrayBuffer, + position: ReadPosition, + }; + + pub const Copy = struct { + pub const FilterCallback = fn (source: string, destination: string) bool; + /// Dereference symlinks + /// @default false + dereference: bool = false, + + /// When `force` is `false`, and the destination + /// exists, throw an error. + /// @default false + errorOnExist: bool = false, + + /// Function to filter copied files/directories. Return + /// `true` to copy the item, `false` to ignore it. + filter: ?FilterCallback = null, + + /// Overwrite existing file or directory. _The copy + /// operation will ignore errors if you set this to false and the destination + /// exists. Use the `errorOnExist` option to change this behavior. + /// @default true + force: bool = true, + + /// When `true` timestamps from `src` will + /// be preserved. + /// @default false + preserve_timestamps: bool = false, + + /// Copy directories recursively. + /// @default false + recursive: bool = false, + }; + + pub const UnwatchFile = void; + pub const Watch = void; + pub const WatchFile = void; + pub const Fsync = struct { + fd: FileDescriptor, + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?Fsync { + const fd = JSC.Node.fileDescriptorFromJS(ctx, arguments.next() orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "File descriptor is required", + .{}, + ctx, + exception, + ); + } + return null; + }, exception) orelse { + if (exception.* == null) { + JSC.throwInvalidArguments( + "fd must be a number", + .{}, + ctx, + exception, + ); + } + return null; + }; + + if (exception.* != null) return null; + + return Fsync{ + .fd = fd, + }; + } + }; +}; + +const Return = struct { + pub const Access = void; + pub const AppendFile = void; + pub const Close = void; + pub const CopyFile = void; + pub const Exists = bool; + pub const Fchmod = void; + pub const Chmod = void; + pub const Fchown = void; + pub const Fdatasync = void; + pub const Fstat = Stats; + pub const Rm = void; + pub const Fsync = void; + pub const Ftruncate = void; + pub const Futimes = void; + pub const Lchmod = void; + pub const Lchown = void; + pub const Link = void; + pub const Lstat = Stats; + pub const Mkdir = string; + pub const Mkdtemp = PathString; + pub const Open = FileDescriptor; + pub const WriteFile = void; + pub const Read = struct { + bytes_read: u52, + + pub fn toJS(this: Read, _: JSC.C.JSContextRef, _: JSC.C.ExceptionRef) JSC.C.JSValueRef { + return JSC.JSValue.jsNumberFromUint64(this.bytes_read).asObjectRef(); + } + }; + pub const ReadPromise = struct { + bytes_read: u52, + buffer_val: JSC.JSValue = JSC.JSValue.zero, + const fields = .{ + .@"bytesRead" = JSC.ZigString.init("bytesRead"), + .@"buffer" = JSC.ZigString.init("buffer"), + }; + pub fn toJS(this: Read, ctx: JSC.C.JSContextRef, _: JSC.C.ExceptionRef) JSC.C.JSValueRef { + defer if (!this.buffer_val.isEmptyOrUndefinedOrNull()) + JSC.C.JSValueUnprotect(ctx, this.buffer_val.asObjectRef()); + + return JSC.JSValue.createObject2( + ctx.ptr(), + &fields.bytesRead, + &fields.buffer, + JSC.JSValue.jsNumberFromUint64(@intCast(u52, @minimum(std.math.maxInt(u52), this.bytes_read))), + this.buffer_val, + ).asObjectRef(); + } + }; + + pub const WritePromise = struct { + bytes_written: u52, + buffer: StringOrBuffer, + buffer_val: JSC.JSValue = JSC.JSValue.zero, + const fields = .{ + .@"bytesWritten" = JSC.ZigString.init("bytesWritten"), + .@"buffer" = JSC.ZigString.init("buffer"), + }; + + // Excited for the issue that's like "cannot read file bigger than 2 GB" + pub fn toJS(this: Write, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) JSC.C.JSValueRef { + defer if (!this.buffer_val.isEmptyOrUndefinedOrNull() and this.buffer == .buffer) + JSC.C.JSValueUnprotect(ctx, this.buffer_val.asObjectRef()); + + return JSC.JSValue.createObject2( + ctx.ptr(), + &fields.bytesWritten, + &fields.buffer, + JSC.JSValue.jsNumberFromUint64(@intCast(u52, @minimum(std.math.maxInt(u52), this.bytes_written))), + if (this.buffer == .buffer) + this.buffer_val + else + JSC.JSValue.fromRef(this.buffer.toJS(ctx, exception)), + ).asObjectRef(); + } + }; + pub const Write = struct { + bytes_written: u52, + const fields = .{ + .@"bytesWritten" = JSC.ZigString.init("bytesWritten"), + }; + + // Excited for the issue that's like "cannot read file bigger than 2 GB" + pub fn toJS(this: Write, _: JSC.C.JSContextRef, _: JSC.C.ExceptionRef) JSC.C.JSValueRef { + return JSC.JSValue.jsNumberFromUint64(this.bytes_written).asObjectRef(); + } + }; + + pub const Readdir = union(Tag) { + with_file_types: []const DirEnt, + buffers: []const Buffer, + files: []const PathString, + + pub const Tag = enum { + with_file_types, + buffers, + files, + }; + + pub fn toJS(this: Readdir, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) JSC.C.JSValueRef { + return switch (this) { + .with_file_types => JSC.To.JS.withType([]const DirEnt, this.with_file_types, ctx, exception), + .buffers => JSC.To.JS.withType([]const Buffer, this.buffers, ctx, exception), + .files => JSC.To.JS.withTypeClone([]const PathString, this.files, ctx, exception, true), + }; + } + }; + pub const ReadFile = StringOrBuffer; + pub const Readlink = StringOrBuffer; + pub const Realpath = StringOrBuffer; + pub const RealpathNative = Realpath; + pub const Rename = void; + pub const Rmdir = void; + pub const Stat = Stats; + + pub const Symlink = void; + pub const Truncate = void; + pub const Unlink = void; + pub const UnwatchFile = void; + pub const Watch = void; + pub const WatchFile = void; + pub const Utimes = void; + + pub const CreateReadStream = *JSC.Node.Stream; + pub const CreateWriteStream = *JSC.Node.Stream; + pub const Chown = void; + pub const Lutimes = void; +}; + +/// Bun's implementation of the Node.js "fs" module +/// https://nodejs.org/api/fs.html +/// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/types/node/fs.d.ts +pub const NodeFS = struct { + async_io: *AsyncIO, + + /// Buffer to store a temporary file path that might appear in a returned error message. + /// + /// We want to avoid allocating a new path buffer for every error message so that JSC can clone + GC it. + /// That means a stack-allocated buffer won't suffice. Instead, we re-use + /// the heap allocated buffer on the NodefS struct + sync_error_buf: [bun.MAX_PATH_BYTES]u8 = undefined, + + pub const ReturnType = Return; + + pub fn access(this: *NodeFS, args: Arguments.Access, comptime _: Flavor) Maybe(Return.Access) { + var path = args.path.sliceZ(&this.sync_error_buf); + const rc = Syscall.system.access(path, @enumToInt(args.mode)); + return Maybe(Return.Access).errnoSysP(rc, .access, path) orelse Maybe(Return.Access).success; + } + + pub fn appendFile(this: *NodeFS, args: Arguments.AppendFile, comptime flavor: Flavor) Maybe(Return.AppendFile) { + var data = args.data.slice(); + + switch (args.file) { + .fd => |fd| { + switch (comptime flavor) { + .sync => { + while (data.len > 0) { + const written = switch (Syscall.write(fd, data)) { + .result => |result| result, + .err => |err| return .{ .err = err }, + }; + data = data[written..]; + } + + return Maybe(Return.AppendFile).success; + }, + else => { + _ = this; + @compileError("Not implemented yet"); + }, + } + }, + .path => |path_| { + const path = path_.sliceZ(&this.sync_error_buf); + switch (comptime flavor) { + .sync => { + const fd = switch (Syscall.open(path, @enumToInt(FileSystemFlags.@"a"), 000666)) { + .result => |result| result, + .err => |err| return .{ .err = err }, + }; + + defer { + _ = Syscall.close(fd); + } + + while (data.len > 0) { + const written = switch (Syscall.write(fd, data)) { + .result => |result| result, + .err => |err| return .{ .err = err }, + }; + data = data[written..]; + } + + return Maybe(Return.AppendFile).success; + }, + else => { + _ = this; + @compileError("Not implemented yet"); + }, + } + }, + } + + return Maybe(Return.AppendFile).todo; + } + + pub fn close(this: *NodeFS, args: Arguments.Close, comptime flavor: Flavor) Maybe(Return.Close) { + switch (comptime flavor) { + .sync => { + return if (Syscall.close(args.fd)) |err| .{ .err = err } else Maybe(Return.Close).success; + }, + else => { + _ = this; + }, + } + + return .{ .err = Syscall.Error.todo }; + } + + /// https://github.com/libuv/libuv/pull/2233 + /// https://github.com/pnpm/pnpm/issues/2761 + /// https://github.com/libuv/libuv/pull/2578 + /// https://github.com/nodejs/node/issues/34624 + pub fn copyFile(this: *NodeFS, args: Arguments.CopyFile, comptime flavor: Flavor) Maybe(Return.CopyFile) { + const ret = Maybe(Return.CopyFile); + + switch (comptime flavor) { + .sync => { + var src_buf: [bun.MAX_PATH_BYTES]u8 = undefined; + var dest_buf: [bun.MAX_PATH_BYTES]u8 = undefined; + var src = args.src.sliceZ(&src_buf); + var dest = args.dest.sliceZ(&dest_buf); + + if (comptime Environment.isMac) { + if (args.mode.isForceClone()) { + // https://www.manpagez.com/man/2/clonefile/ + return ret.errnoSysP(C.clonefile(src, dest, 0), .clonefile, src) orelse ret.success; + } + + var mode: Mode = C.darwin.COPYFILE_ACL | C.darwin.COPYFILE_DATA; + if (args.mode.shouldntOverwrite()) { + mode |= C.darwin.COPYFILE_EXCL; + } + + return ret.errnoSysP(C.copyfile(src, dest, null, mode), .copyfile, src) orelse ret.success; + } + + if (comptime Environment.isLinux) { + const src_fd = switch (Syscall.open(src, std.os.O.RDONLY, 0644)) { + .result => |result| result, + .err => |err| return .{ .err = err }, + }; + defer { + _ = Syscall.close(src_fd); + } + + const stat_: linux.Stat = switch (Syscall.fstat(src_fd)) { + .result => |result| result, + .err => |err| return Maybe(Return.CopyFile){ .err = err }, + }; + + if (!os.S.ISREG(stat_.mode)) { + return Maybe(Return.CopyFile){ .err = .{ .errno = @enumToInt(C.SystemErrno.ENOTSUP) } }; + } + + var flags: Mode = std.os.O.CREAT | std.os.O.WRONLY | std.os.O.TRUNC; + if (args.mode.shouldntOverwrite()) { + flags |= std.os.O.EXCL; + } + + const dest_fd = switch (Syscall.open(dest, flags, flags)) { + .result => |result| result, + .err => |err| return Maybe(Return.CopyFile){ .err = err }, + }; + defer { + _ = Syscall.close(dest_fd); + } + + var off_in_copy = @bitCast(i64, @as(u64, 0)); + var off_out_copy = @bitCast(i64, @as(u64, 0)); + + // https://manpages.debian.org/testing/manpages-dev/ioctl_ficlone.2.en.html + if (args.mode.isForceClone()) { + return Maybe(Return.CopyFile).todo; + } + + var size = @intCast(usize, @maximum(stat_.size, 0)); + + if (size == 0) { + // copy until EOF + size = std.mem.page_size; + while (true) { + // Linux Kernel 5.3 or later + const written = linux.copy_file_range(src_fd, &off_in_copy, dest_fd, &off_out_copy, size, 0); + if (ret.errnoSysP(written, .copy_file_range, dest)) |err| return err; + // wrote zero bytes means EOF + if (written == 0) break; + size -|= written; + } + } else { + while (size > 0) { + // Linux Kernel 5.3 or later + const written = linux.copy_file_range(src_fd, &off_in_copy, dest_fd, &off_out_copy, size, 0); + if (ret.errnoSysP(written, .copy_file_range, dest)) |err| return err; + // wrote zero bytes means EOF + if (written == 0) break; + size -|= written; + } + } + + return ret.success; + } + }, + else => { + _ = args; + _ = this; + _ = flavor; + }, + } + + return Maybe(Return.CopyFile).todo; + } + pub fn exists(this: *NodeFS, args: Arguments.Exists, comptime flavor: Flavor) Maybe(Return.Exists) { + const Ret = Maybe(Return.Exists); + const path = args.path.sliceZ(&this.sync_error_buf); + switch (comptime flavor) { + .sync => { + // access() may not work correctly on NFS file systems with UID + // mapping enabled, because UID mapping is done on the server and + // hidden from the client, which checks permissions. Similar + // problems can occur to FUSE mounts. + const rc = (system.access(path, std.os.F_OK)); + return Ret{ .result = rc == 0 }; + }, + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Ret.todo; + } + + pub fn chown(this: *NodeFS, args: Arguments.Chown, comptime flavor: Flavor) Maybe(Return.Chown) { + const path = args.path.sliceZ(&this.sync_error_buf); + + switch (comptime flavor) { + .sync => return Syscall.chown(path, args.uid, args.gid), + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Chown).todo; + } + + /// This should almost never be async + pub fn chmod(this: *NodeFS, args: Arguments.Chmod, comptime flavor: Flavor) Maybe(Return.Chmod) { + const path = args.path.sliceZ(&this.sync_error_buf); + + switch (comptime flavor) { + .sync => { + return Maybe(Return.Chmod).errnoSysP(C.chmod(path, args.mode), .chmod, path) orelse + Maybe(Return.Chmod).success; + }, + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Chmod).todo; + } + + /// This should almost never be async + pub fn fchmod(this: *NodeFS, args: Arguments.FChmod, comptime flavor: Flavor) Maybe(Return.Fchmod) { + switch (comptime flavor) { + .sync => { + return Syscall.fchmod(args.fd, args.mode); + }, + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Fchmod).todo; + } + pub fn fchown(this: *NodeFS, args: Arguments.Fchown, comptime flavor: Flavor) Maybe(Return.Fchown) { + switch (comptime flavor) { + .sync => { + return Maybe(Return.Fchown).errnoSys(C.fchown(args.fd, args.uid, args.gid), .fchown) orelse + Maybe(Return.Fchown).success; + }, + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Fchown).todo; + } + pub fn fdatasync(this: *NodeFS, args: Arguments.FdataSync, comptime flavor: Flavor) Maybe(Return.Fdatasync) { + switch (comptime flavor) { + .sync => return Maybe(Return.Fdatasync).errnoSys(system.fdatasync(args.fd), .fdatasync) orelse + Maybe(Return.Fdatasync).success, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Fdatasync).todo; + } + pub fn fstat(this: *NodeFS, args: Arguments.Fstat, comptime flavor: Flavor) Maybe(Return.Fstat) { + if (args.big_int) return Maybe(Return.Fstat).todo; + + switch (comptime flavor) { + .sync => { + return switch (Syscall.fstat(args.fd)) { + .result => |result| Maybe(Return.Fstat){ .result = Stats.init(result) }, + .err => |err| Maybe(Return.Fstat){ .err = err }, + }; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Fstat).todo; + } + + pub fn fsync(this: *NodeFS, args: Arguments.Fsync, comptime flavor: Flavor) Maybe(Return.Fsync) { + switch (comptime flavor) { + .sync => return Maybe(Return.Fsync).errnoSys(system.fsync(args.fd), .fsync) orelse + Maybe(Return.Fsync).success, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Fsync).todo; + } + + pub fn ftruncate(this: *NodeFS, args: Arguments.FTruncate, comptime flavor: Flavor) Maybe(Return.Ftruncate) { + switch (comptime flavor) { + .sync => return Maybe(Return.Ftruncate).errnoSys(system.ftruncate(args.fd, args.len orelse 0), .ftruncate) orelse + Maybe(Return.Ftruncate).success, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Ftruncate).todo; + } + pub fn futimes(this: *NodeFS, args: Arguments.Futimes, comptime flavor: Flavor) Maybe(Return.Futimes) { + var times = [2]std.os.timespec{ + .{ + .tv_sec = args.mtime, + .tv_nsec = 0, + }, + .{ + .tv_sec = args.atime, + .tv_nsec = 0, + }, + }; + + switch (comptime flavor) { + .sync => return if (Maybe(Return.Futimes).errnoSys(system.futimens(args.fd, ×), .futimens)) |err| + err + else + Maybe(Return.Futimes).success, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Futimes).todo; + } + + pub fn lchmod(this: *NodeFS, args: Arguments.LCHmod, comptime flavor: Flavor) Maybe(Return.Lchmod) { + const path = args.path.sliceZ(&this.sync_error_buf); + + switch (comptime flavor) { + .sync => { + return Maybe(Return.Lchmod).errnoSysP(C.lchmod(path, args.mode), .lchmod, path) orelse + Maybe(Return.Lchmod).success; + }, + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Lchmod).todo; + } + + pub fn lchown(this: *NodeFS, args: Arguments.LChown, comptime flavor: Flavor) Maybe(Return.Lchown) { + const path = args.path.sliceZ(&this.sync_error_buf); + + switch (comptime flavor) { + .sync => { + return Maybe(Return.Lchown).errnoSysP(C.lchown(path, args.uid, args.gid), .lchown, path) orelse + Maybe(Return.Lchown).success; + }, + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Lchown).todo; + } + pub fn link(this: *NodeFS, args: Arguments.Link, comptime flavor: Flavor) Maybe(Return.Link) { + var new_path_buf: [bun.MAX_PATH_BYTES]u8 = undefined; + const from = args.old_path.sliceZ(&this.sync_error_buf); + const to = args.new_path.sliceZ(&new_path_buf); + + switch (comptime flavor) { + .sync => { + return Maybe(Return.Link).errnoSysP(system.link(from, to, 0), .link, from) orelse + Maybe(Return.Link).success; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Link).todo; + } + pub fn lstat(this: *NodeFS, args: Arguments.Lstat, comptime flavor: Flavor) Maybe(Return.Lstat) { + if (args.big_int) return Maybe(Return.Lstat).todo; + + switch (comptime flavor) { + .sync => { + return switch (Syscall.lstat( + args.path.sliceZ( + &this.sync_error_buf, + ), + )) { + .result => |result| Maybe(Return.Lstat){ .result = Return.Lstat.init(result) }, + .err => |err| Maybe(Return.Lstat){ .err = err }, + }; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Lstat).todo; + } + + pub fn mkdir(this: *NodeFS, args: Arguments.Mkdir, comptime flavor: Flavor) Maybe(Return.Mkdir) { + return if (args.recursive) mkdirRecursive(this, args, flavor) else mkdirNonRecursive(this, args, flavor); + } + // Node doesn't absolute the path so we don't have to either + fn mkdirNonRecursive(this: *NodeFS, args: Arguments.Mkdir, comptime flavor: Flavor) Maybe(Return.Mkdir) { + switch (comptime flavor) { + .sync => { + const path = args.path.sliceZ(&this.sync_error_buf); + return switch (Syscall.mkdir(path, args.mode)) { + .result => Maybe(Return.Mkdir){ .result = "" }, + .err => |err| Maybe(Return.Mkdir){ .err = err }, + }; + }, + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Mkdir).todo; + } + + // TODO: windows + // TODO: verify this works correctly with unicode codepoints + fn mkdirRecursive(this: *NodeFS, args: Arguments.Mkdir, comptime flavor: Flavor) Maybe(Return.Mkdir) { + const Option = Maybe(Return.Mkdir); + if (comptime Environment.isWindows) @compileError("This needs to be implemented on Windows."); + + switch (comptime flavor) { + // The sync version does no allocation except when returning the path + .sync => { + var buf: [bun.MAX_PATH_BYTES]u8 = undefined; + const path = args.path.sliceZWithForceCopy(&buf, true); + const len = @truncate(u16, path.len); + + // First, attempt to create the desired directory + // If that fails, then walk back up the path until we have a match + switch (Syscall.mkdir(path, args.mode)) { + .err => |err| { + switch (err.getErrno()) { + else => { + @memcpy(&this.sync_error_buf, path.ptr, len); + return .{ .err = err.withPath(this.sync_error_buf[0..len]) }; + }, + + .EXIST => { + return Option{ .result = "" }; + }, + // continue + .NOENT => {}, + } + }, + .result => { + return Option{ .result = args.path.slice() }; + }, + } + + var working_mem = &this.sync_error_buf; + @memcpy(working_mem, path.ptr, len); + + var i: u16 = len - 1; + + // iterate backwards until creating the directory works successfully + while (i > 0) : (i -= 1) { + if (path[i] == std.fs.path.sep) { + working_mem[i] = 0; + var parent: [:0]u8 = working_mem[0..i :0]; + + switch (Syscall.mkdir(parent, args.mode)) { + .err => |err| { + working_mem[i] = std.fs.path.sep; + switch (err.getErrno()) { + .EXIST => { + // Handle race condition + break; + }, + .NOENT => { + continue; + }, + else => return .{ .err = err.withPath(parent) }, + } + }, + .result => { + // We found a parent that worked + working_mem[i] = std.fs.path.sep; + break; + }, + } + } + } + var first_match: u16 = i; + i += 1; + // after we find one that works, we go forward _after_ the first working directory + while (i < len) : (i += 1) { + if (path[i] == std.fs.path.sep) { + working_mem[i] = 0; + var parent: [:0]u8 = working_mem[0..i :0]; + + switch (Syscall.mkdir(parent, args.mode)) { + .err => |err| { + working_mem[i] = std.fs.path.sep; + switch (err.getErrno()) { + .EXIST => { + if (Environment.allow_assert) std.debug.assert(false); + continue; + }, + else => return .{ .err = err }, + } + }, + + .result => { + working_mem[i] = std.fs.path.sep; + }, + } + } + } + + working_mem[len] = 0; + + // Our final directory will not have a trailing separator + // so we have to create it once again + switch (Syscall.mkdir(working_mem[0..len :0], args.mode)) { + .err => |err| { + switch (err.getErrno()) { + // handle the race condition + .EXIST => { + var display_path: []const u8 = ""; + if (first_match != std.math.maxInt(u16)) { + // TODO: this leaks memory + display_path = bun.default_allocator.dupe(u8, display_path[0..first_match]) catch unreachable; + } + return Option{ .result = display_path }; + }, + + // NOENT shouldn't happen here + else => return .{ + .err = err.withPath(path), + }, + } + }, + .result => { + var display_path = args.path.slice(); + if (first_match != std.math.maxInt(u16)) { + // TODO: this leaks memory + display_path = bun.default_allocator.dupe(u8, display_path[0..first_match]) catch unreachable; + } + return Option{ .result = display_path }; + }, + } + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Mkdir).todo; + } + + pub fn mkdtemp(this: *NodeFS, args: Arguments.MkdirTemp, comptime flavor: Flavor) Maybe(Return.Mkdtemp) { + var prefix_buf = &this.sync_error_buf; + prefix_buf[0] = 0; + const len = args.prefix.len; + if (len > 0) { + @memcpy(prefix_buf, args.prefix.ptr, len); + prefix_buf[len] = 0; + } + + const rc = C.mkdtemp(prefix_buf); + switch (std.c.getErrno(@ptrToInt(rc))) { + .SUCCESS => {}, + else => |errno| return .{ .err = Syscall.Error{ .errno = @truncate(Syscall.Error.Int, @enumToInt(errno)), .syscall = .mkdtemp } }, + } + + _ = this; + _ = flavor; + return .{ + .result = PathString.init(bun.default_allocator.dupe(u8, std.mem.span(rc.?)) catch unreachable), + }; + } + pub fn open(this: *NodeFS, args: Arguments.Open, comptime flavor: Flavor) Maybe(Return.Open) { + switch (comptime flavor) { + // The sync version does no allocation except when returning the path + .sync => { + const path = args.path.sliceZ(&this.sync_error_buf); + return switch (Syscall.open(path, @enumToInt(args.flags), args.mode)) { + .err => |err| .{ + .err = err.withPath(args.path.slice()), + }, + .result => |fd| .{ .result = fd }, + }; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Open).todo; + } + pub fn openDir(this: *NodeFS, args: Arguments.OpenDir, comptime flavor: Flavor) Maybe(Return.OpenDir) { + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.OpenDir).todo; + } + + fn _read(this: *NodeFS, args: Arguments.Read, comptime flavor: Flavor) Maybe(Return.Read) { + _ = args; + _ = this; + _ = flavor; + if (Environment.allow_assert) std.debug.assert(args.position == null); + + switch (comptime flavor) { + // The sync version does no allocation except when returning the path + .sync => { + var buf = args.buffer.slice(); + buf = buf[@minimum(args.offset, buf.len)..]; + buf = buf[0..@minimum(buf.len, args.length)]; + + return switch (Syscall.read(args.fd, buf)) { + .err => |err| .{ + .err = err, + }, + .result => |amt| .{ + .result = .{ + .bytes_read = @truncate(u52, amt), + }, + }, + }; + }, + else => {}, + } + + return Maybe(Return.Read).todo; + } + + fn _pread(this: *NodeFS, args: Arguments.Read, comptime flavor: Flavor) Maybe(Return.Read) { + _ = this; + + switch (comptime flavor) { + .sync => { + var buf = args.buffer.slice(); + buf = buf[@minimum(args.offset, buf.len)..]; + buf = buf[0..@minimum(buf.len, args.length)]; + + return switch (Syscall.pread(args.fd, buf, args.position.?)) { + .err => |err| .{ + .err = err, + }, + .result => |amt| .{ + .result = .{ + .bytes_read = @truncate(u52, amt), + }, + }, + }; + }, + else => {}, + } + + return Maybe(Return.Read).todo; + } + + pub fn read(this: *NodeFS, args: Arguments.Read, comptime flavor: Flavor) Maybe(Return.Read) { + return if (args.position != null) + this._pread( + args, + comptime flavor, + ) + else + this._read( + args, + comptime flavor, + ); + } + + pub fn write(this: *NodeFS, args: Arguments.Write, comptime flavor: Flavor) Maybe(Return.Write) { + return if (args.position != null) _pwrite(this, args, flavor) else _write(this, args, flavor); + } + fn _write(this: *NodeFS, args: Arguments.Write, comptime flavor: Flavor) Maybe(Return.Write) { + _ = args; + _ = this; + _ = flavor; + + switch (comptime flavor) { + .sync => { + var buf = args.buffer.slice(); + buf = buf[@minimum(args.offset, buf.len)..]; + buf = buf[0..@minimum(buf.len, args.length)]; + + return switch (Syscall.write(args.fd, buf)) { + .err => |err| .{ + .err = err, + }, + .result => |amt| .{ + .result = .{ + .bytes_written = @truncate(u52, amt), + }, + }, + }; + }, + else => {}, + } + + return Maybe(Return.Write).todo; + } + + fn _pwrite(this: *NodeFS, args: Arguments.Write, comptime flavor: Flavor) Maybe(Return.Write) { + _ = args; + _ = this; + _ = flavor; + + const position = args.position.?; + + switch (comptime flavor) { + .sync => { + var buf = args.buffer.slice(); + buf = buf[@minimum(args.offset, buf.len)..]; + buf = buf[0..@minimum(args.length, buf.len)]; + + return switch (Syscall.pwrite(args.fd, buf, position)) { + .err => |err| .{ + .err = err, + }, + .result => |amt| .{ .result = .{ + .bytes_written = @truncate(u52, amt), + } }, + }; + }, + else => {}, + } + + return Maybe(Return.Write).todo; + } + + pub fn readdir(this: *NodeFS, args: Arguments.Readdir, comptime flavor: Flavor) Maybe(Return.Readdir) { + return switch (args.encoding) { + .buffer => _readdir( + this, + args, + Buffer, + flavor, + ), + else => { + if (!args.with_file_types) { + return _readdir( + this, + args, + PathString, + flavor, + ); + } + + return _readdir( + this, + args, + DirEnt, + flavor, + ); + }, + }; + } + + pub fn _readdir( + this: *NodeFS, + args: Arguments.Readdir, + comptime ExpectedType: type, + comptime flavor: Flavor, + ) Maybe(Return.Readdir) { + const file_type = comptime switch (ExpectedType) { + DirEnt => "with_file_types", + PathString => "files", + Buffer => "buffers", + else => unreachable, + }; + + switch (comptime flavor) { + .sync => { + var path = args.path.sliceZ(&this.sync_error_buf); + const flags = os.O.DIRECTORY | os.O.RDONLY; + const fd = switch (Syscall.open(path, flags, 0)) { + .err => |err| return .{ + .err = err.withPath(args.path.slice()), + }, + .result => |fd_| fd_, + }; + defer { + _ = Syscall.close(fd); + } + + var entries = std.ArrayList(ExpectedType).init(bun.default_allocator); + var dir = std.fs.Dir{ .fd = fd }; + var iterator = DirIterator.iterate(dir); + var entry = iterator.next(); + while (switch (entry) { + .err => |err| { + for (entries.items) |*item| { + switch (comptime ExpectedType) { + DirEnt => { + bun.default_allocator.free(item.name.slice()); + }, + Buffer => { + item.destroy(); + }, + PathString => { + bun.default_allocator.free(item.slice()); + }, + else => unreachable, + } + } + + entries.deinit(); + + return .{ + .err = err.withPath(args.path.slice()), + }; + }, + .result => |ent| ent, + }) |current| : (entry = iterator.next()) { + switch (comptime ExpectedType) { + DirEnt => { + entries.append(.{ + .name = PathString.init(bun.default_allocator.dupe(u8, current.name.slice()) catch unreachable), + .kind = current.kind, + }) catch unreachable; + }, + Buffer => { + const slice = current.name.slice(); + entries.append(Buffer.fromString(slice, bun.default_allocator) catch unreachable) catch unreachable; + }, + PathString => { + entries.append( + PathString.init(bun.default_allocator.dupe(u8, current.name.slice()) catch unreachable), + ) catch unreachable; + }, + else => unreachable, + } + } + + return .{ .result = @unionInit(Return.Readdir, file_type, entries.items) }; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Readdir).todo; + } + pub fn readFile(this: *NodeFS, args: Arguments.ReadFile, comptime flavor: Flavor) Maybe(Return.ReadFile) { + var path: [:0]const u8 = undefined; + switch (comptime flavor) { + .sync => { + const fd = switch (args.path) { + .path => brk: { + path = args.path.path.sliceZ(&this.sync_error_buf); + break :brk switch (Syscall.open( + path, + os.O.RDONLY | os.O.NOCTTY, + 0, + )) { + .err => |err| return .{ + .err = err.withPath(if (args.path == .path) args.path.path.slice() else ""), + }, + .result => |fd_| fd_, + }; + }, + .fd => |_fd| _fd, + }; + + defer { + if (args.path == .path) + _ = Syscall.close(fd); + } + + const stat_ = switch (Syscall.fstat(fd)) { + .err => |err| return .{ + .err = err, + }, + .result => |stat_| stat_, + }; + + const size = @intCast(u64, @maximum(stat_.size, 0)); + var buf = std.ArrayList(u8).init(bun.default_allocator); + buf.ensureTotalCapacityPrecise(size + 16) catch unreachable; + buf.expandToCapacity(); + var total: usize = 0; + while (total < size) { + switch (Syscall.read(fd, buf.items.ptr[total..buf.capacity])) { + .err => |err| return .{ + .err = err, + }, + .result => |amt| { + total += amt; + // There are cases where stat()'s size is wrong or out of date + if (total > size and amt != 0) { + buf.ensureUnusedCapacity(8096) catch unreachable; + buf.expandToCapacity(); + continue; + } + + if (amt == 0) { + break; + } + }, + } + } + buf.items.len = total; + return switch (args.encoding) { + .buffer => .{ + .result = .{ + .buffer = Buffer.fromBytes(buf.items, bun.default_allocator, .Uint8Array), + }, + }, + else => .{ + .result = .{ + .string = buf.items, + }, + }, + }; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.ReadFile).todo; + } + + pub fn writeFile(this: *NodeFS, args: Arguments.WriteFile, comptime flavor: Flavor) Maybe(Return.WriteFile) { + var path: [:0]const u8 = undefined; + + switch (comptime flavor) { + .sync => { + const fd = switch (args.file) { + .path => brk: { + path = args.file.path.sliceZ(&this.sync_error_buf); + break :brk switch (Syscall.open( + path, + @enumToInt(args.flag) | os.O.NOCTTY, + args.mode, + )) { + .err => |err| return .{ + .err = err.withPath(path), + }, + .result => |fd_| fd_, + }; + }, + .fd => |_fd| _fd, + }; + + defer { + if (args.file == .path) + _ = Syscall.close(fd); + } + + var buf = args.data.slice(); + var written: usize = 0; + + while (buf.len > 0) { + switch (Syscall.write(fd, buf)) { + .err => |err| return .{ + .err = err, + }, + .result => |amt| { + buf = buf[amt..]; + written += amt; + if (amt == 0) { + break; + } + }, + } + } + + _ = this.ftruncate(.{ .fd = fd, .len = @truncate(JSC.WebCore.Blob.SizeType, written) }, .sync); + + return Maybe(Return.WriteFile).success; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.WriteFile).todo; + } + + pub fn readlink(this: *NodeFS, args: Arguments.Readlink, comptime flavor: Flavor) Maybe(Return.Readlink) { + var outbuf: [bun.MAX_PATH_BYTES]u8 = undefined; + var inbuf = &this.sync_error_buf; + switch (comptime flavor) { + .sync => { + const path = args.path.sliceZ(inbuf); + + const len = switch (Syscall.readlink(path, &outbuf)) { + .err => |err| return .{ + .err = err.withPath(args.path.slice()), + }, + .result => |buf_| buf_, + }; + + return .{ + .result = switch (args.encoding) { + .buffer => .{ + .buffer = Buffer.fromString(outbuf[0..len], bun.default_allocator) catch unreachable, + }, + else => .{ + .string = bun.default_allocator.dupe(u8, outbuf[0..len]) catch unreachable, + }, + }, + }; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Readlink).todo; + } + pub fn realpath(this: *NodeFS, args: Arguments.Realpath, comptime flavor: Flavor) Maybe(Return.Realpath) { + var outbuf: [bun.MAX_PATH_BYTES]u8 = undefined; + var inbuf = &this.sync_error_buf; + if (comptime Environment.allow_assert) std.debug.assert(FileSystem.instance_loaded); + + switch (comptime flavor) { + .sync => { + var path_slice = args.path.slice(); + + var parts = [_]string{ FileSystem.instance.top_level_dir, path_slice }; + var path_ = FileSystem.instance.absBuf(&parts, inbuf); + inbuf[path_.len] = 0; + var path: [:0]u8 = inbuf[0..path_.len :0]; + + const flags = if (comptime Environment.isLinux) + // O_PATH is faster + std.os.O.PATH + else + std.os.O.RDONLY; + + const fd = switch (Syscall.open(path, flags, 0)) { + .err => |err| return .{ + .err = err.withPath(path), + }, + .result => |fd_| fd_, + }; + + defer { + _ = Syscall.close(fd); + } + + const buf = switch (Syscall.getFdPath(fd, &outbuf)) { + .err => |err| return .{ + .err = err.withPath(path), + }, + .result => |buf_| buf_, + }; + + return .{ + .result = switch (args.encoding) { + .buffer => .{ + .buffer = Buffer.fromString(buf, bun.default_allocator) catch unreachable, + }, + else => .{ + .string = bun.default_allocator.dupe(u8, buf) catch unreachable, + }, + }, + }; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Realpath).todo; + } + pub const realpathNative = realpath; + // pub fn realpathNative(this: *NodeFS, args: Arguments.Realpath, comptime flavor: Flavor) Maybe(Return.Realpath) { + // _ = args; + // _ = this; + // _ = flavor; + // return error.NotImplementedYet; + // } + pub fn rename(this: *NodeFS, args: Arguments.Rename, comptime flavor: Flavor) Maybe(Return.Rename) { + var from_buf = &this.sync_error_buf; + var to_buf: [bun.MAX_PATH_BYTES]u8 = undefined; + + switch (comptime flavor) { + .sync => { + var from = args.old_path.sliceZ(from_buf); + var to = args.new_path.sliceZ(&to_buf); + return Syscall.rename(from, to); + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Rename).todo; + } + pub fn rmdir(this: *NodeFS, args: Arguments.RmDir, comptime flavor: Flavor) Maybe(Return.Rmdir) { + switch (comptime flavor) { + .sync => { + var dir = args.old_path.sliceZ(&this.sync_error_buf); + _ = dir; + }, + else => {}, + } + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Rmdir).todo; + } + pub fn rm(this: *NodeFS, args: Arguments.RmDir, comptime flavor: Flavor) Maybe(Return.Rm) { + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Rm).todo; + } + pub fn stat(this: *NodeFS, args: Arguments.Stat, comptime flavor: Flavor) Maybe(Return.Stat) { + if (args.big_int) return Maybe(Return.Stat).todo; + + switch (comptime flavor) { + .sync => { + return @as(Maybe(Return.Stat), switch (Syscall.stat( + args.path.sliceZ( + &this.sync_error_buf, + ), + )) { + .result => |result| Maybe(Return.Stat){ .result = Return.Stat.init(result) }, + .err => |err| Maybe(Return.Stat){ .err = err }, + }); + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Stat).todo; + } + + pub fn symlink(this: *NodeFS, args: Arguments.Symlink, comptime flavor: Flavor) Maybe(Return.Symlink) { + var to_buf: [bun.MAX_PATH_BYTES]u8 = undefined; + + switch (comptime flavor) { + .sync => { + return Syscall.symlink( + args.old_path.sliceZ(&this.sync_error_buf), + args.new_path.sliceZ(&to_buf), + ); + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Symlink).todo; + } + fn _truncate(this: *NodeFS, path: PathLike, len: JSC.WebCore.Blob.SizeType, comptime flavor: Flavor) Maybe(Return.Truncate) { + switch (comptime flavor) { + .sync => { + return Maybe(Return.Truncate).errno(C.truncate(path.sliceZ(&this.sync_error_buf), len)) orelse + Maybe(Return.Truncate).success; + }, + else => {}, + } + + _ = this; + _ = flavor; + return Maybe(Return.Truncate).todo; + } + pub fn truncate(this: *NodeFS, args: Arguments.Truncate, comptime flavor: Flavor) Maybe(Return.Truncate) { + return switch (args.path) { + .fd => |fd| this.ftruncate( + Arguments.FTruncate{ .fd = fd, .len = args.len }, + flavor, + ), + .path => this._truncate( + args.path.path, + args.len, + flavor, + ), + }; + } + pub fn unlink(this: *NodeFS, args: Arguments.Unlink, comptime flavor: Flavor) Maybe(Return.Unlink) { + switch (comptime flavor) { + .sync => { + return Maybe(Return.Unlink).errnoSysP(system.unlink(args.path.sliceZ(&this.sync_error_buf)), .unlink, args.path.slice()) orelse + Maybe(Return.Unlink).success; + }, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Unlink).todo; + } + pub fn unwatchFile(this: *NodeFS, args: Arguments.UnwatchFile, comptime flavor: Flavor) Maybe(Return.UnwatchFile) { + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.UnwatchFile).todo; + } + pub fn utimes(this: *NodeFS, args: Arguments.Utimes, comptime flavor: Flavor) Maybe(Return.Utimes) { + var times = [2]std.c.timeval{ + .{ + .tv_sec = args.mtime, + // TODO: is this correct? + .tv_usec = 0, + }, + .{ + .tv_sec = args.atime, + // TODO: is this correct? + .tv_usec = 0, + }, + }; + + switch (comptime flavor) { + // futimes uses the syscall version + // we use libc because here, not for a good reason + // just missing from the linux syscall interface in zig and I don't want to modify that right now + .sync => return if (Maybe(Return.Utimes).errnoSysP(std.c.utimes(args.path.sliceZ(&this.sync_error_buf), ×), .utimes, args.path.slice())) |err| + err + else + Maybe(Return.Utimes).success, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Utimes).todo; + } + + pub fn lutimes(this: *NodeFS, args: Arguments.Lutimes, comptime flavor: Flavor) Maybe(Return.Lutimes) { + var times = [2]std.c.timeval{ + .{ + .tv_sec = args.mtime, + // TODO: is this correct? + .tv_usec = 0, + }, + .{ + .tv_sec = args.atime, + // TODO: is this correct? + .tv_usec = 0, + }, + }; + + switch (comptime flavor) { + // futimes uses the syscall version + // we use libc because here, not for a good reason + // just missing from the linux syscall interface in zig and I don't want to modify that right now + .sync => return if (Maybe(Return.Lutimes).errnoSysP(C.lutimes(args.path.sliceZ(&this.sync_error_buf), ×), .lutimes, args.path.slice())) |err| + err + else + Maybe(Return.Lutimes).success, + else => {}, + } + + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Lutimes).todo; + } + pub fn watch(this: *NodeFS, args: Arguments.Watch, comptime flavor: Flavor) Maybe(Return.Watch) { + _ = args; + _ = this; + _ = flavor; + return Maybe(Return.Watch).todo; + } + pub fn createReadStream(this: *NodeFS, args: Arguments.CreateReadStream, comptime flavor: Flavor) Maybe(Return.CreateReadStream) { + _ = args; + _ = this; + _ = flavor; + var stream = bun.default_allocator.create(JSC.Node.Stream) catch unreachable; + stream.* = JSC.Node.Stream{ + .sink = .{ + .readable = JSC.Node.Readable{ + .stream = stream, + .globalObject = args.global_object, + }, + }, + .sink_type = .readable, + .content = undefined, + .content_type = undefined, + .allocator = bun.default_allocator, + }; + + args.file.copyToStream(args.flags, args.autoClose, args.mode, bun.default_allocator, stream) catch unreachable; + args.copyToState(&stream.sink.readable.state); + return Maybe(Return.CreateReadStream){ .result = stream }; + } + pub fn createWriteStream(this: *NodeFS, args: Arguments.CreateWriteStream, comptime flavor: Flavor) Maybe(Return.CreateWriteStream) { + _ = args; + _ = this; + _ = flavor; + var stream = bun.default_allocator.create(JSC.Node.Stream) catch unreachable; + stream.* = JSC.Node.Stream{ + .sink = .{ + .writable = JSC.Node.Writable{ + .stream = stream, + .globalObject = args.global_object, + }, + }, + .sink_type = .writable, + .content = undefined, + .content_type = undefined, + .allocator = bun.default_allocator, + }; + args.file.copyToStream(args.flags, args.autoClose, args.mode, bun.default_allocator, stream) catch unreachable; + args.copyToState(&stream.sink.writable.state); + return Maybe(Return.CreateWriteStream){ .result = stream }; + } +}; diff --git a/src/bun.js/node/node_fs_binding.zig b/src/bun.js/node/node_fs_binding.zig new file mode 100644 index 000000000..298727fb9 --- /dev/null +++ b/src/bun.js/node/node_fs_binding.zig @@ -0,0 +1,423 @@ +const JSC = @import("../../jsc.zig"); +const std = @import("std"); +const Flavor = JSC.Node.Flavor; +const ArgumentsSlice = JSC.Node.ArgumentsSlice; +const system = std.os.system; +const Maybe = JSC.Maybe; +const Encoding = JSC.Node.Encoding; +const FeatureFlags = @import("../../global.zig").FeatureFlags; +const Args = JSC.Node.NodeFS.Arguments; +const d = JSC.d; + +const NodeFSFunction = fn ( + *JSC.Node.NodeFS, + JSC.C.JSContextRef, + JSC.C.JSObjectRef, + JSC.C.JSObjectRef, + []const JSC.C.JSValueRef, + JSC.C.ExceptionRef, +) JSC.C.JSValueRef; + +pub const toJSTrait = std.meta.trait.hasFn("toJS"); +pub const fromJSTrait = std.meta.trait.hasFn("fromJS"); +const NodeFSFunctionEnum = JSC.Node.DeclEnum(JSC.Node.NodeFS); + +fn callSync(comptime FunctionEnum: NodeFSFunctionEnum) NodeFSFunction { + const Function = @field(JSC.Node.NodeFS, @tagName(FunctionEnum)); + const FunctionType = @TypeOf(Function); + + const function: std.builtin.TypeInfo.Fn = comptime @typeInfo(FunctionType).Fn; + comptime if (function.args.len != 3) @compileError("Expected 3 arguments"); + const Arguments = comptime function.args[1].arg_type.?; + const FormattedName = comptime [1]u8{std.ascii.toUpper(@tagName(FunctionEnum)[0])} ++ @tagName(FunctionEnum)[1..]; + const Result = comptime JSC.Maybe(@field(JSC.Node.NodeFS.ReturnType, FormattedName)); + + const NodeBindingClosure = struct { + pub fn bind( + this: *JSC.Node.NodeFS, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + arguments: []const JSC.C.JSValueRef, + exception: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + var slice = ArgumentsSlice.init(ctx.bunVM(), @ptrCast([*]const JSC.JSValue, arguments.ptr)[0..arguments.len]); + defer slice.deinit(); + + const args = if (comptime Arguments != void) + (Arguments.fromJS(ctx, &slice, exception) orelse return null) + else + Arguments{}; + if (exception.* != null) return null; + + const result: Result = Function( + this, + args, + comptime Flavor.sync, + ); + return switch (result) { + .err => |err| brk: { + exception.* = err.toJS(ctx); + break :brk null; + }, + .result => |res| if (comptime Result.ReturnType != void) + JSC.To.JS.withType(Result.ReturnType, res, ctx, exception) + else + JSC.C.JSValueMakeUndefined(ctx), + }; + } + }; + + return NodeBindingClosure.bind; +} + +fn call(comptime Function: NodeFSFunctionEnum) NodeFSFunction { + // const FunctionType = @TypeOf(Function); + _ = Function; + + // const function: std.builtin.TypeInfo.Fn = comptime @typeInfo(FunctionType).Fn; + // comptime if (function.args.len != 3) @compileError("Expected 3 arguments"); + // const Arguments = comptime function.args[2].arg_type orelse @compileError(std.fmt.comptimePrint("Function {s} expected to have an arg type at [2]", .{@typeName(FunctionType)})); + // const Result = comptime function.return_type.?; + // comptime if (Arguments != void and !fromJSTrait(Arguments)) @compileError(std.fmt.comptimePrint("{s} is missing fromJS()", .{@typeName(Arguments)})); + // comptime if (Result != void and !toJSTrait(Result)) @compileError(std.fmt.comptimePrint("{s} is missing toJS()", .{@typeName(Result)})); + const NodeBindingClosure = struct { + pub fn bind( + this: *JSC.Node.NodeFS, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + arguments: []const JSC.C.JSValueRef, + exception: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + _ = this; + _ = ctx; + _ = arguments; + var err = JSC.SystemError{}; + exception.* = err.toErrorInstance(ctx.ptr()).asObjectRef(); + return null; + // var slice = ArgumentsSlice.init(arguments); + + // defer { + // for (arguments.len) |arg| { + // JSC.C.JSValueUnprotect(ctx, arg); + // } + // slice.arena.deinit(); + // } + + // const args = if (comptime Arguments != void) + // Arguments.fromJS(ctx, &slice, exception) + // else + // Arguments{}; + // if (exception.* != null) return null; + + // const result: Maybe(Result) = Function(this, comptime Flavor.sync, args); + // switch (result) { + // .err => |err| { + // exception.* = err.toJS(ctx); + // return null; + // }, + // .result => |res| { + // return switch (comptime Result) { + // void => JSC.JSValue.jsUndefined().asRef(), + // else => res.toJS(ctx), + // }; + // }, + // } + // unreachable; + } + }; + return NodeBindingClosure.bind; +} + +pub const NodeFSBindings = JSC.NewClass( + JSC.Node.NodeFS, + .{ .name = "fs", .ts = .{ .module = .{ .path = "fs" } } }, + + .{ + .access = .{ + .name = "access", + .rfn = call(.access), + }, + .appendFile = .{ + .name = "appendFile", + .rfn = call(.appendFile), + }, + .close = .{ + .name = "close", + .rfn = call(.close), + }, + .copyFile = .{ + .name = "copyFile", + .rfn = call(.copyFile), + }, + .exists = .{ + .name = "exists", + .rfn = call(.exists), + }, + .chown = .{ + .name = "chown", + .rfn = call(.chown), + }, + .chmod = .{ + .name = "chmod", + .rfn = call(.chmod), + }, + .fchmod = .{ + .name = "fchmod", + .rfn = call(.fchmod), + }, + .fchown = .{ + .name = "fchown", + .rfn = call(.fchown), + }, + .fstat = .{ + .name = "fstat", + .rfn = call(.fstat), + }, + .fsync = .{ + .name = "fsync", + .rfn = call(.fsync), + }, + .ftruncate = .{ + .name = "ftruncate", + .rfn = call(.ftruncate), + }, + .futimes = .{ + .name = "futimes", + .rfn = call(.futimes), + }, + .lchmod = .{ + .name = "lchmod", + .rfn = call(.lchmod), + }, + .lchown = .{ + .name = "lchown", + .rfn = call(.lchown), + }, + .link = .{ + .name = "link", + .rfn = call(.link), + }, + .lstat = .{ + .name = "lstat", + .rfn = call(.lstat), + }, + .mkdir = .{ + .name = "mkdir", + .rfn = call(.mkdir), + }, + .mkdtemp = .{ + .name = "mkdtemp", + .rfn = call(.mkdtemp), + }, + .open = .{ + .name = "open", + .rfn = call(.open), + }, + .read = .{ + .name = "read", + .rfn = call(.read), + }, + .write = .{ + .name = "write", + .rfn = call(.write), + }, + .readdir = .{ + .name = "readdir", + .rfn = call(.readdir), + }, + .readFile = .{ + .name = "readFile", + .rfn = call(.readFile), + }, + .writeFile = .{ + .name = "writeFile", + .rfn = call(.writeFile), + }, + .readlink = .{ + .name = "readlink", + .rfn = call(.readlink), + }, + .realpath = .{ + .name = "realpath", + .rfn = call(.realpath), + }, + .rename = .{ + .name = "rename", + .rfn = call(.rename), + }, + .stat = .{ + .name = "stat", + .rfn = call(.stat), + }, + .symlink = .{ + .name = "symlink", + .rfn = call(.symlink), + }, + .truncate = .{ + .name = "truncate", + .rfn = call(.truncate), + }, + .unlink = .{ + .name = "unlink", + .rfn = call(.unlink), + }, + .utimes = .{ + .name = "utimes", + .rfn = call(.utimes), + }, + .lutimes = .{ + .name = "lutimes", + .rfn = call(.lutimes), + }, + + .createReadStream = .{ + .name = "createReadStream", + .rfn = if (FeatureFlags.node_streams) callSync(.createReadStream) else call(.createReadStream), + }, + + .createWriteStream = .{ + .name = "createWriteStream", + .rfn = if (FeatureFlags.node_streams) callSync(.createWriteStream) else call(.createWriteStream), + }, + + .accessSync = .{ + .name = "accessSync", + .rfn = callSync(.access), + }, + .appendFileSync = .{ + .name = "appendFileSync", + .rfn = callSync(.appendFile), + }, + .closeSync = .{ + .name = "closeSync", + .rfn = callSync(.close), + }, + .copyFileSync = .{ + .name = "copyFileSync", + .rfn = callSync(.copyFile), + }, + .existsSync = .{ + .name = "existsSync", + .rfn = callSync(.exists), + }, + .chownSync = .{ + .name = "chownSync", + .rfn = callSync(.chown), + }, + .chmodSync = .{ + .name = "chmodSync", + .rfn = callSync(.chmod), + }, + .fchmodSync = .{ + .name = "fchmodSync", + .rfn = callSync(.fchmod), + }, + .fchownSync = .{ + .name = "fchownSync", + .rfn = callSync(.fchown), + }, + .fstatSync = .{ + .name = "fstatSync", + .rfn = callSync(.fstat), + }, + .fsyncSync = .{ + .name = "fsyncSync", + .rfn = callSync(.fsync), + }, + .ftruncateSync = .{ + .name = "ftruncateSync", + .rfn = callSync(.ftruncate), + }, + .futimesSync = .{ + .name = "futimesSync", + .rfn = callSync(.futimes), + }, + .lchmodSync = .{ + .name = "lchmodSync", + .rfn = callSync(.lchmod), + }, + .lchownSync = .{ + .name = "lchownSync", + .rfn = callSync(.lchown), + }, + .linkSync = .{ + .name = "linkSync", + .rfn = callSync(.link), + }, + .lstatSync = .{ + .name = "lstatSync", + .rfn = callSync(.lstat), + }, + .mkdirSync = .{ + .name = "mkdirSync", + .rfn = callSync(.mkdir), + }, + .mkdtempSync = .{ + .name = "mkdtempSync", + .rfn = callSync(.mkdtemp), + }, + .openSync = .{ + .name = "openSync", + .rfn = callSync(.open), + }, + .readSync = .{ + .name = "readSync", + .rfn = callSync(.read), + }, + .writeSync = .{ + .name = "writeSync", + .rfn = callSync(.write), + }, + .readdirSync = .{ + .name = "readdirSync", + .rfn = callSync(.readdir), + }, + .readFileSync = .{ + .name = "readFileSync", + .rfn = callSync(.readFile), + }, + .writeFileSync = .{ + .name = "writeFileSync", + .rfn = callSync(.writeFile), + }, + .readlinkSync = .{ + .name = "readlinkSync", + .rfn = callSync(.readlink), + }, + .realpathSync = .{ + .name = "realpathSync", + .rfn = callSync(.realpath), + }, + .renameSync = .{ + .name = "renameSync", + .rfn = callSync(.rename), + }, + .statSync = .{ + .name = "statSync", + .rfn = callSync(.stat), + }, + .symlinkSync = .{ + .name = "symlinkSync", + .rfn = callSync(.symlink), + }, + .truncateSync = .{ + .name = "truncateSync", + .rfn = callSync(.truncate), + }, + .unlinkSync = .{ + .name = "unlinkSync", + .rfn = callSync(.unlink), + }, + .utimesSync = .{ + .name = "utimesSync", + .rfn = callSync(.utimes), + }, + .lutimesSync = .{ + .name = "lutimesSync", + .rfn = callSync(.lutimes), + }, + }, + .{}, +); diff --git a/src/bun.js/node/node_fs_constant.zig b/src/bun.js/node/node_fs_constant.zig new file mode 100644 index 000000000..72e752184 --- /dev/null +++ b/src/bun.js/node/node_fs_constant.zig @@ -0,0 +1,204 @@ +const bun = @import("../../global.zig"); +const Environment = bun.Environment; +const std = @import("std"); + +fn get(comptime name: []const u8) comptime_int { + return if (@hasDecl(std.os.O, name)) + return @field(std.os.O, name) + else + return 0; +} +pub const Constants = struct { + // File Access Constants + /// Constant for fs.access(). File is visible to the calling process. + pub const F_OK = std.os.F_OK; + /// Constant for fs.access(). File can be read by the calling process. + pub const R_OK = std.os.R_OK; + /// Constant for fs.access(). File can be written by the calling process. + pub const W_OK = std.os.W_OK; + /// Constant for fs.access(). File can be executed by the calling process. + pub const X_OK = std.os.X_OK; + // File Copy Constants + pub const Copyfile = enum(i32) { + _, + pub const exclusive = 1; + pub const clone = 2; + pub const force = 4; + + pub inline fn isForceClone(this: Copyfile) bool { + return (@enumToInt(this) & COPYFILE_FICLONE_FORCE) != 0; + } + + pub inline fn shouldntOverwrite(this: Copyfile) bool { + return (@enumToInt(this) & COPYFILE_EXCL) != 0; + } + + pub inline fn canUseClone(this: Copyfile) bool { + _ = this; + return Environment.isMac; + // return (@enumToInt(this) | COPYFILE_FICLONE) != 0; + } + }; + + /// Constant for fs.copyFile. Flag indicating the destination file should not be overwritten if it already exists. + pub const COPYFILE_EXCL: i32 = 1 << Copyfile.exclusive; + + /// + /// Constant for fs.copyFile. copy operation will attempt to create a copy-on-write reflink. + /// If the underlying platform does not support copy-on-write, then a fallback copy mechanism is used. + pub const COPYFILE_FICLONE: i32 = 1 << Copyfile.clone; + /// + /// Constant for fs.copyFile. Copy operation will attempt to create a copy-on-write reflink. + /// If the underlying platform does not support copy-on-write, then the operation will fail with an error. + pub const COPYFILE_FICLONE_FORCE: i32 = 1 << Copyfile.force; + // File Open Constants + /// Constant for fs.open(). Flag indicating to open a file for read-only access. + pub const O_RDONLY = std.os.O.RDONLY; + /// Constant for fs.open(). Flag indicating to open a file for write-only access. + pub const O_WRONLY = std.os.O.WRONLY; + /// Constant for fs.open(). Flag indicating to open a file for read-write access. + pub const O_RDWR = std.os.O.RDWR; + /// Constant for fs.open(). Flag indicating to create the file if it does not already exist. + pub const O_CREAT = std.os.O.CREAT; + /// Constant for fs.open(). Flag indicating that opening a file should fail if the O_CREAT flag is set and the file already exists. + pub const O_EXCL = std.os.O.EXCL; + + /// + /// Constant for fs.open(). Flag indicating that if path identifies a terminal device, + /// opening the path shall not cause that terminal to become the controlling terminal for the process + /// (if the process does not already have one). + pub const O_NOCTTY = std.os.O.NOCTTY; + /// Constant for fs.open(). Flag indicating that if the file exists and is a regular file, and the file is opened successfully for write access, its length shall be truncated to zero. + pub const O_TRUNC = std.os.O.TRUNC; + /// Constant for fs.open(). Flag indicating that data will be appended to the end of the file. + pub const O_APPEND = std.os.O.APPEND; + /// Constant for fs.open(). Flag indicating that the open should fail if the path is not a directory. + pub const O_DIRECTORY = std.os.O.DIRECTORY; + + /// + /// constant for fs.open(). + /// Flag indicating reading accesses to the file system will no longer result in + /// an update to the atime information associated with the file. + /// This flag is available on Linux operating systems only. + pub const O_NOATIME = get("NOATIME"); + /// Constant for fs.open(). Flag indicating that the open should fail if the path is a symbolic link. + pub const O_NOFOLLOW = std.os.O.NOFOLLOW; + /// Constant for fs.open(). Flag indicating that the file is opened for synchronous I/O. + pub const O_SYNC = std.os.O.SYNC; + /// Constant for fs.open(). Flag indicating that the file is opened for synchronous I/O with write operations waiting for data integrity. + pub const O_DSYNC = std.os.O.DSYNC; + /// Constant for fs.open(). Flag indicating to open the symbolic link itself rather than the resource it is pointing to. + pub const O_SYMLINK = get("SYMLINK"); + /// Constant for fs.open(). When set, an attempt will be made to minimize caching effects of file I/O. + pub const O_DIRECT = get("DIRECT"); + /// Constant for fs.open(). Flag indicating to open the file in nonblocking mode when possible. + pub const O_NONBLOCK = std.os.O.NONBLOCK; + // File Type Constants + /// Constant for fs.Stats mode property for determining a file's type. Bit mask used to extract the file type code. + pub const S_IFMT = std.os.S.IFMT; + /// Constant for fs.Stats mode property for determining a file's type. File type constant for a regular file. + pub const S_IFREG = std.os.S.IFREG; + /// Constant for fs.Stats mode property for determining a file's type. File type constant for a directory. + pub const S_IFDIR = std.os.S.IFDIR; + /// Constant for fs.Stats mode property for determining a file's type. File type constant for a character-oriented device file. + pub const S_IFCHR = std.os.S.IFCHR; + /// Constant for fs.Stats mode property for determining a file's type. File type constant for a block-oriented device file. + pub const S_IFBLK = std.os.S.IFBLK; + /// Constant for fs.Stats mode property for determining a file's type. File type constant for a FIFO/pipe. + pub const S_IFIFO = std.os.S.IFIFO; + /// Constant for fs.Stats mode property for determining a file's type. File type constant for a symbolic link. + pub const S_IFLNK = std.os.S.IFLNK; + /// Constant for fs.Stats mode property for determining a file's type. File type constant for a socket. + pub const S_IFSOCK = std.os.S.IFSOCK; + // File Mode Constants + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by owner. + pub const S_IRWXU = std.os.S.IRWXU; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by owner. + pub const S_IRUSR = std.os.S.IRUSR; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by owner. + pub const S_IWUSR = std.os.S.IWUSR; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by owner. + pub const S_IXUSR = std.os.S.IXUSR; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by group. + pub const S_IRWXG = std.os.S.IRWXG; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by group. + pub const S_IRGRP = std.os.S.IRGRP; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by group. + pub const S_IWGRP = std.os.S.IWGRP; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by group. + pub const S_IXGRP = std.os.S.IXGRP; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by others. + pub const S_IRWXO = std.os.S.IRWXO; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by others. + pub const S_IROTH = std.os.S.IROTH; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by others. + pub const S_IWOTH = std.os.S.IWOTH; + /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by others. + pub const S_IXOTH = std.os.S.IXOTH; + + /// + /// When set, a memory file mapping is used to access the file. This flag + /// is available on Windows operating systems only. On other operating systems, + /// this flag is ignored. + pub const UV_FS_O_FILEMAP = 49152; +}; + +// Due to zig's format support max 32 arguments, we need to split +// here. +const constants_string_format1 = + \\export var constants = {{ + \\ F_OK: {d}, + \\ R_OK: {d}, + \\ W_OK: {d}, + \\ X_OK: {d}, + \\ COPYFILE_EXCL: {d}, + \\ COPYFILE_FICLONE: {d}, + \\ COPYFILE_FICLONE_FORCE: {d}, + \\ O_RDONLY: {d}, + \\ O_WRONLY: {d}, + \\ O_RDWR: {d}, + \\ O_CREAT: {d}, + \\ O_EXCL: {d}, + \\ O_NOCTTY: {d}, + \\ O_TRUNC: {d}, + \\ O_APPEND: {d}, + \\ O_DIRECTORY: {d}, + \\ O_NOATIME: {d}, + \\ O_NOFOLLOW: {d}, + \\ O_SYNC: {d}, + \\ O_DSYNC: {d}, +; +const constants_string_format2 = + \\ O_SYMLINK: {s}, + \\ O_DIRECT: {d}, + \\ O_NONBLOCK: {d}, + \\ S_IFMT: {d}, + \\ S_IFREG: {d}, + \\ S_IFDIR: {d}, + \\ S_IFCHR: {d}, + \\ S_IFBLK: {d}, + \\ S_IFIFO: {d}, + \\ S_IFLNK: {d}, + \\ S_IFSOCK: {d}, + \\ S_IRWXU: {d}, + \\ S_IRUSR: {d}, + \\ S_IWUSR: {d}, + \\ S_IXUSR: {d}, + \\ S_IRWXG: {d}, + \\ S_IRGRP: {d}, + \\ S_IWGRP: {d}, + \\ S_IXGRP: {d}, + \\ S_IRWXO: {d}, + \\ S_IROTH: {d}, + \\ S_IWOTH: {d}, + \\ S_IXOTH: {d}, + \\ UV_FS_O_FILEMAP: {d} + \\}} +; + +const constants_string1 = std.fmt.comptimePrint(constants_string_format1, .{ Constants.F_OK, Constants.R_OK, Constants.W_OK, Constants.X_OK, Constants.COPYFILE_EXCL, Constants.COPYFILE_FICLONE, Constants.COPYFILE_FICLONE_FORCE, Constants.O_RDONLY, Constants.O_WRONLY, Constants.O_RDWR, Constants.O_CREAT, Constants.O_EXCL, Constants.O_NOCTTY, Constants.O_TRUNC, Constants.O_APPEND, Constants.O_DIRECTORY, Constants.O_NOATIME, Constants.O_NOFOLLOW, Constants.O_SYNC, Constants.O_DSYNC }); + +const constants_string2 = + std.fmt.comptimePrint(constants_string_format2, .{ if (@TypeOf(Constants.O_SYMLINK) == void) "undefined" else std.fmt.comptimePrint("{}", .{Constants.O_SYMLINK}), Constants.O_DIRECT, Constants.O_NONBLOCK, Constants.S_IFMT, Constants.S_IFREG, Constants.S_IFDIR, Constants.S_IFCHR, Constants.S_IFBLK, Constants.S_IFIFO, Constants.S_IFLNK, Constants.S_IFSOCK, Constants.S_IRWXU, Constants.S_IRUSR, Constants.S_IWUSR, Constants.S_IXUSR, Constants.S_IRWXG, Constants.S_IRGRP, Constants.S_IWGRP, Constants.S_IXGRP, Constants.S_IRWXO, Constants.S_IROTH, Constants.S_IWOTH, Constants.S_IXOTH, Constants.UV_FS_O_FILEMAP }); + +pub const constants_string = constants_string1 ++ constants_string2; diff --git a/src/bun.js/node/nodejs_error_code.zig b/src/bun.js/node/nodejs_error_code.zig new file mode 100644 index 000000000..5c54791ee --- /dev/null +++ b/src/bun.js/node/nodejs_error_code.zig @@ -0,0 +1,1097 @@ +/// https://nodejs.org/api/errors.html#nodejs-error-codes +pub const Code = enum { + /// Used when an operation has been aborted (typically using an AbortController). + /// APIs not using AbortSignals typically do not raise an error with this code. + /// This code does not use the regular ERR_* convention Node.js errors use in order to be compatible with the web platform's AbortError. + /// Added in: v15.0.0 + ABORT_ERR, + + /// A function argument is being used in a way that suggests that the function signature may be misunderstood. This is thrown by the assert module when the message parameter in assert.throws(block, message) matches the error message thrown by block because that usage suggests that the user believes message is the expected message rather than the message the AssertionError will display if block does not throw. + ERR_AMBIGUOUS_ARGUMENT, + + /// An iterable argument (i.e. a value that works with for...of loops) was required, but not provided to a Node.js API. + ERR_ARG_NOT_ITERABLE, + + /// A special type of error that can be triggered whenever Node.js detects an exceptional logic violation that should never occur. These are raised typically by the assert module. + ERR_ASSERTION, + + /// An attempt was made to register something that is not a function as an AsyncHooks callback. + ERR_ASYNC_CALLBACK, + + /// The type of an asynchronous resource was invalid. Users are also able to define their own types if using the public embedder API. + ERR_ASYNC_TYPE, + + /// Data passed to a Brotli stream was not successfully compressed. + ERR_BROTLI_COMPRESSION_FAILED, + + /// An invalid parameter key was passed during construction of a Brotli stream. + ERR_BROTLI_INVALID_PARAM, + + /// When encountering this error, a possible alternative to creating a Buffer instance is to create a normal Uint8Array, which only differs in the prototype of the resulting object. Uint8Arrays are generally accepted in all Node.js core APIs where Buffers are; they are available in all Contexts. + /// An attempt was made to create a Node.js Buffer instance from addon or embedder code, while in a JS engine Context that is not associated with a Node.js instance. The data passed to the Buffer method will have been released by the time the method returns. + ERR_BUFFER_CONTEXT_NOT_AVAILABLE, + + /// An operation outside the bounds of a Buffer was attempted. + ERR_BUFFER_OUT_OF_BOUNDS, + + /// An attempt has been made to create a Buffer larger than the maximum allowed size. + ERR_BUFFER_TOO_LARGE, + + /// Node.js was unable to watch for the SIGINT signal. + ERR_CANNOT_WATCH_SIGINT, + + /// A child process was closed before the parent received a reply. + ERR_CHILD_CLOSED_BEFORE_REPLY, + + /// Used when a child process is being forked without specifying an IPC channel. + ERR_CHILD_PROCESS_IPC_REQUIRED, + + /// Used when the main process is trying to read data from the child process's STDERR/STDOUT, and the data's length is longer than the maxBuffer option. + ERR_CHILD_PROCESS_STDIO_MAXBUFFER, + + /// There was an attempt to use a MessagePort instance in a closed state, usually after .close() has been called. + ERR_CLOSED_MESSAGE_PORT, + + /// Console was instantiated without stdout stream, or Console has a non-writable stdout or stderr stream. + ERR_CONSOLE_WRITABLE_STREAM, + + /// A class constructor was called that is not callable. + ERR_CONSTRUCT_CALL_INVALID, + + /// A constructor for a class was called without new. + ERR_CONSTRUCT_CALL_REQUIRED, + + /// The vm context passed into the API is not yet initialized. This could happen when an error occurs (and is caught) during the creation of the context, for example, when the allocation fails or the maximum call stack size is reached when the context is created. + ERR_CONTEXT_NOT_INITIALIZED, + + /// A client certificate engine was requested that is not supported by the version of OpenSSL being used. + ERR_CRYPTO_CUSTOM_ENGINE_NOT_SUPPORTED, + + /// An invalid value for the format argument was passed to the crypto.ECDH() class getPublicKey() method. + ERR_CRYPTO_ECDH_INVALID_FORMAT, + + /// An invalid value for the key argument has been passed to the crypto.ECDH() class computeSecret() method. It means that the public key lies outside of the elliptic curve. + ERR_CRYPTO_ECDH_INVALID_PUBLIC_KEY, + + /// An invalid crypto engine identifier was passed to require('crypto').setEngine(). + ERR_CRYPTO_ENGINE_UNKNOWN, + + /// The --force-fips command-line argument was used but there was an attempt to enable or disable FIPS mode in the crypto module. + ERR_CRYPTO_FIPS_FORCED, + + /// An attempt was made to enable or disable FIPS mode, but FIPS mode was not available. + ERR_CRYPTO_FIPS_UNAVAILABLE, + + /// hash.digest() was called multiple times. The hash.digest() method must be called no more than one time per instance of a Hash object. + ERR_CRYPTO_HASH_FINALIZED, + + /// hash.update() failed for any reason. This should rarely, if ever, happen. + ERR_CRYPTO_HASH_UPDATE_FAILED, + + /// The given crypto keys are incompatible with the attempted operation. + ERR_CRYPTO_INCOMPATIBLE_KEY, + + /// The selected public or private key encoding is incompatible with other options. + ERR_CRYPTO_INCOMPATIBLE_KEY_OPTIONS, + + /// Added in: v15.0.0 + /// Initialization of the crypto subsystem failed. + ERR_CRYPTO_INITIALIZATION_FAILED, + + /// Added in: v15.0.0 + /// An invalid authentication tag was provided. + ERR_CRYPTO_INVALID_AUTH_TAG, + + /// Added in: v15.0.0 + /// An invalid counter was provided for a counter-mode cipher. + ERR_CRYPTO_INVALID_COUNTER, + + /// An invalid elliptic-curve was provided. + /// Added in: v15.0.0 + ERR_CRYPTO_INVALID_CURVE, + + /// An invalid crypto digest algorithm was specified. + ERR_CRYPTO_INVALID_DIGEST, + + /// Added in: v15.0.0 + /// An invalid initialization vector was provided. + ERR_CRYPTO_INVALID_IV, + + /// Added in: v15.0.0 + /// An invalid JSON Web Key was provided. + ERR_CRYPTO_INVALID_JWK, + + /// The given crypto key object's type is invalid for the attempted operation. + ERR_CRYPTO_INVALID_KEY_OBJECT_TYPE, + + /// Added in: v15.0.0 + /// An invalid key length was provided. + ERR_CRYPTO_INVALID_KEYLEN, + + /// An invalid key pair was provided. + /// Added in: v15.0.0 + ERR_CRYPTO_INVALID_KEYPAIR, + + /// An invalid key type was provided. + /// Added in: v15.0.0 + ERR_CRYPTO_INVALID_KEYTYPE, + + /// Added in: v15.0.0 + /// An invalid message length was provided. + ERR_CRYPTO_INVALID_MESSAGELEN, + + /// Invalid scrypt algorithm parameters were provided. + /// Added in: v15.0.0 + ERR_CRYPTO_INVALID_SCRYPT_PARAMS, + + /// A crypto method was used on an object that was in an invalid state. For instance, calling cipher.getAuthTag() before calling cipher.final(). + ERR_CRYPTO_INVALID_STATE, + + /// Added in: v15.0.0 + /// An invalid authentication tag length was provided. + ERR_CRYPTO_INVALID_TAG_LENGTH, + + /// Added in: v15.0.0 + //// Initialization of an asynchronous crypto operation failed. + ERR_CRYPTO_JOB_INIT_FAILED, + + /// Key's Elliptic Curve is not registered for use in the JSON Web Key Elliptic Curve Registry. + ERR_CRYPTO_JWK_UNSUPPORTED_CURVE, + + /// Key's Asymmetric Key Type is not registered for use in the JSON Web Key Types Registry. + ERR_CRYPTO_JWK_UNSUPPORTED_KEY_TYPE, + + /// A crypto operation failed for an otherwise unspecified reason. + /// Added in: v15.0.0 + ERR_CRYPTO_OPERATION_FAILED, + + /// The PBKDF2 algorithm failed for unspecified reasons. OpenSSL does not provide more details and therefore neither does Node.js. + ERR_CRYPTO_PBKDF2_ERROR, + + /// One or more crypto.scrypt() or crypto.scryptSync() parameters are outside their legal range. + ERR_CRYPTO_SCRYPT_INVALID_PARAMETER, + + /// Node.js was compiled without scrypt support. Not possible with the official release binaries but can happen with custom builds, including distro builds. + ERR_CRYPTO_SCRYPT_NOT_SUPPORTED, + + /// A signing key was not provided to the sign.sign() method. + ERR_CRYPTO_SIGN_KEY_REQUIRED, + + /// crypto.timingSafeEqual() was called with Buffer, TypedArray, or DataView arguments of different lengths. + ERR_CRYPTO_TIMING_SAFE_EQUAL_LENGTH, + + /// An unknown cipher was specified. + ERR_CRYPTO_UNKNOWN_CIPHER, + + /// An unknown Diffie-Hellman group name was given. See crypto.getDiffieHellman() for a list of valid group names. + ERR_CRYPTO_UNKNOWN_DH_GROUP, + + /// Added in: v15.0.0, v14.18.0 + /// An attempt to invoke an unsupported crypto operation was made. + ERR_CRYPTO_UNSUPPORTED_OPERATION, + + /// An error occurred with the debugger. + /// Added in: v16.4.0, v14.17.4 + ERR_DEBUGGER_ERROR, + + /// Added in: v16.4.0, v14.17.4 + /// The debugger timed out waiting for the required host/port to be free. + ERR_DEBUGGER_STARTUP_ERROR, + + /// Added in: v16.10.0 + /// Loading native addons has been disabled using --no-addons. + ERR_DLOPEN_DISABLED, + + /// Added in: v15.0.0 + /// A call to process.dlopen() failed. + ERR_DLOPEN_FAILED, + + /// The fs.Dir was previously closed. + ERR_DIR_CLOSED, + + /// A synchronous read or close call was attempted on an fs.Dir which has ongoing asynchronous operations. + /// Added in: v14.3.0 + ERR_DIR_CONCURRENT_OPERATION, + + /// c-ares failed to set the DNS server. + ERR_DNS_SET_SERVERS_FAILED, + + /// The domain module was not usable since it could not establish the required error handling hooks, because process.setUncaughtExceptionCaptureCallback() had been called at an earlier point in time. + ERR_DOMAIN_CALLBACK_NOT_AVAILABLE, + + /// process.setUncaughtExceptionCaptureCallback() could not be called because the domain module has been loaded at an earlier point in time. + /// The stack trace is extended to include the point in time at which the domain module had been loaded. + ERR_DOMAIN_CANNOT_SET_UNCAUGHT_EXCEPTION_CAPTURE, + + /// Data provided to TextDecoder() API was invalid according to the encoding provided. + ERR_ENCODING_INVALID_ENCODED_DATA, + + /// Encoding provided to TextDecoder() API was not one of the WHATWG Supported Encodings. + ERR_ENCODING_NOT_SUPPORTED, + + /// --print cannot be used with ESM input. + ERR_EVAL_ESM_CANNOT_PRINT, + + /// Thrown when an attempt is made to recursively dispatch an event on EventTarget. + ERR_EVENT_RECURSION, + + /// The JS execution context is not associated with a Node.js environment. This may occur when Node.js is used as an embedded library and some hooks for the JS engine are not set up properly. + ERR_EXECUTION_ENVIRONMENT_NOT_AVAILABLE, + + /// A Promise that was callbackified via util.callbackify() was rejected with a falsy value. + ERR_FALSY_VALUE_REJECTION, + + /// Added in: v14.0.0 + /// Used when a feature that is not available to the current platform which is running Node.js is used. + ERR_FEATURE_UNAVAILABLE_ON_PLATFORM, + + /// An attempt was made to copy a directory to a non-directory (file, symlink, etc.) using fs.cp(). + ERR_FS_CP_DIR_TO_NON_DIR, + + /// An attempt was made to copy over a file that already existed with fs.cp(), with the force and errorOnExist set to true. + ERR_FS_CP_EEXIST, + + /// When using fs.cp(), src or dest pointed to an invalid path. + ERR_FS_CP_EINVAL, + + /// An attempt was made to copy a named pipe with fs.cp(). + ERR_FS_CP_FIFO_PIPE, + + /// An attempt was made to copy a non-directory (file, symlink, etc.) to a directory using fs.cp(). + ERR_FS_CP_NON_DIR_TO_DIR, + + /// An attempt was made to copy to a socket with fs.cp(). + ERR_FS_CP_SOCKET, + + /// When using fs.cp(), a symlink in dest pointed to a subdirectory of src. + ERR_FS_CP_SYMLINK_TO_SUBDIRECTORY, + + /// An attempt was made to copy to an unknown file type with fs.cp(). + ERR_FS_CP_UNKNOWN, + + /// Path is a directory. + ERR_FS_EISDIR, + + /// An attempt has been made to read a file whose size is larger than the maximum allowed size for a Buffer. + ERR_FS_FILE_TOO_LARGE, + + /// An invalid symlink type was passed to the fs.symlink() or fs.symlinkSync() methods. + ERR_FS_INVALID_SYMLINK_TYPE, + + /// An attempt was made to add more headers after the headers had already been sent. + ERR_HTTP_HEADERS_SENT, + + /// An invalid HTTP header value was specified. + ERR_HTTP_INVALID_HEADER_VALUE, + + /// Status code was outside the regular status code range (100-999). + ERR_HTTP_INVALID_STATUS_CODE, + + /// The client has not sent the entire request within the allowed time. + ERR_HTTP_REQUEST_TIMEOUT, + + /// Changing the socket encoding is not allowed per RFC 7230 Section 3. + ERR_HTTP_SOCKET_ENCODING, + + /// The Trailer header was set even though the transfer encoding does not support that. + ERR_HTTP_TRAILER_INVALID, + + /// HTTP/2 ALTSVC frames require a valid origin. + ERR_HTTP2_ALTSVC_INVALID_ORIGIN, + + /// HTTP/2 ALTSVC frames are limited to a maximum of 16,382 payload bytes. + ERR_HTTP2_ALTSVC_LENGTH, + + /// For HTTP/2 requests using the CONNECT method, the :authority pseudo-header is required. + ERR_HTTP2_CONNECT_AUTHORITY, + + /// For HTTP/2 requests using the CONNECT method, the :path pseudo-header is forbidden. + ERR_HTTP2_CONNECT_PATH, + + /// For HTTP/2 requests using the CONNECT method, the :scheme pseudo-header is forbidden. + ERR_HTTP2_CONNECT_SCHEME, + + /// A non-specific HTTP/2 error has occurred. + ERR_HTTP2_ERROR, + + /// New HTTP/2 Streams may not be opened after the Http2Session has received a GOAWAY frame from the connected peer. + ERR_HTTP2_GOAWAY_SESSION, + + /// Multiple values were provided for an HTTP/2 header field that was required to have only a single value. + ERR_HTTP2_HEADER_SINGLE_VALUE, + + /// An additional headers was specified after an HTTP/2 response was initiated. + ERR_HTTP2_HEADERS_AFTER_RESPOND, + + /// An attempt was made to send multiple response headers. + ERR_HTTP2_HEADERS_SENT, + + /// Informational HTTP status codes (1xx) may not be set as the response status code on HTTP/2 responses. + ERR_HTTP2_INFO_STATUS_NOT_ALLOWED, + + /// HTTP/1 connection specific headers are forbidden to be used in HTTP/2 requests and responses. + ERR_HTTP2_INVALID_CONNECTION_HEADERS, + + /// An invalid HTTP/2 header value was specified. + ERR_HTTP2_INVALID_HEADER_VALUE, + + /// An invalid HTTP informational status code has been specified. Informational status codes must be an integer between 100 and 199 (inclusive). + ERR_HTTP2_INVALID_INFO_STATUS, + + /// HTTP/2 ORIGIN frames require a valid origin. + ERR_HTTP2_INVALID_ORIGIN, + + /// Input Buffer and Uint8Array instances passed to the http2.getUnpackedSettings() API must have a length that is a multiple of six. + ERR_HTTP2_INVALID_PACKED_SETTINGS_LENGTH, + + /// Only valid HTTP/2 pseudoheaders (:status, :path, :authority, :scheme, and :method) may be used. + ERR_HTTP2_INVALID_PSEUDOHEADER, + + /// An action was performed on an Http2Session object that had already been destroyed. + ERR_HTTP2_INVALID_SESSION, + + /// An invalid value has been specified for an HTTP/2 setting. + ERR_HTTP2_INVALID_SETTING_VALUE, + + /// An operation was performed on a stream that had already been destroyed. + ERR_HTTP2_INVALID_STREAM, + + /// Whenever an HTTP/2 SETTINGS frame is sent to a connected peer, the peer is required to send an acknowledgment that it has received and applied the new SETTINGS. By default, a maximum number of unacknowledged SETTINGS frames may be sent at any given time. This error code is used when that limit has been reached. + ERR_HTTP2_MAX_PENDING_SETTINGS_ACK, + + /// An attempt was made to initiate a new push stream from within a push stream. Nested push streams are not permitted. + ERR_HTTP2_NESTED_PUSH, + + /// Out of memory when using the http2session.setLocalWindowSize(windowSize) API. + ERR_HTTP2_NO_MEM, + + /// An attempt was made to directly manipulate (read, write, pause, resume, etc.) a socket attached to an Http2Session. + ERR_HTTP2_NO_SOCKET_MANIPULATION, + + /// HTTP/2 ORIGIN frames are limited to a length of 16382 bytes. + ERR_HTTP2_ORIGIN_LENGTH, + + /// The number of streams created on a single HTTP/2 session reached the maximum limit. + ERR_HTTP2_OUT_OF_STREAMS, + + /// A message payload was specified for an HTTP response code for which a payload is forbidden. + ERR_HTTP2_PAYLOAD_FORBIDDEN, + + /// An HTTP/2 ping was canceled. + ERR_HTTP2_PING_CANCEL, + + /// HTTP/2 ping payloads must be exactly 8 bytes in length. + ERR_HTTP2_PING_LENGTH, + + /// An HTTP/2 pseudo-header has been used inappropriately. Pseudo-headers are header key names that begin with the : prefix. + ERR_HTTP2_PSEUDOHEADER_NOT_ALLOWED, + + /// An attempt was made to create a push stream, which had been disabled by the client. + ERR_HTTP2_PUSH_DISABLED, + + /// An attempt was made to use the Http2Stream.prototype.responseWithFile() API to send a directory. + ERR_HTTP2_SEND_FILE, + + /// An attempt was made to use the Http2Stream.prototype.responseWithFile() API to send something other than a regular file, but offset or length options were provided. + ERR_HTTP2_SEND_FILE_NOSEEK, + + /// The Http2Session closed with a non-zero error code. + ERR_HTTP2_SESSION_ERROR, + + /// The Http2Session settings canceled. + ERR_HTTP2_SETTINGS_CANCEL, + + /// An attempt was made to connect a Http2Session object to a net.Socket or tls.TLSSocket that had already been bound to another Http2Session object. + ERR_HTTP2_SOCKET_BOUND, + + /// An attempt was made to use the socket property of an Http2Session that has already been closed. + ERR_HTTP2_SOCKET_UNBOUND, + + /// Use of the 101 Informational status code is forbidden in HTTP/2. + ERR_HTTP2_STATUS_101, + + /// An invalid HTTP status code has been specified. Status codes must be an integer between 100 and 599 (inclusive). + ERR_HTTP2_STATUS_INVALID, + + /// An Http2Stream was destroyed before any data was transmitted to the connected peer. + ERR_HTTP2_STREAM_CANCEL, + + /// A non-zero error code was been specified in an RST_STREAM frame. + ERR_HTTP2_STREAM_ERROR, + + /// When setting the priority for an HTTP/2 stream, the stream may be marked as a dependency for a parent stream. This error code is used when an attempt is made to mark a stream and dependent of itself. + ERR_HTTP2_STREAM_SELF_DEPENDENCY, + + /// The limit of acceptable invalid HTTP/2 protocol frames sent by the peer, as specified through the maxSessionInvalidFrames option, has been exceeded. + ERR_HTTP2_TOO_MANY_INVALID_FRAMES, + + /// Trailing headers have already been sent on the Http2Stream. + ERR_HTTP2_TRAILERS_ALREADY_SENT, + + /// The http2stream.sendTrailers() method cannot be called until after the 'wantTrailers' event is emitted on an Http2Stream object. The 'wantTrailers' event will only be emitted if the waitForTrailers option is set for the Http2Stream. + ERR_HTTP2_TRAILERS_NOT_READY, + + /// http2.connect() was passed a URL that uses any protocol other than http: or https:. + ERR_HTTP2_UNSUPPORTED_PROTOCOL, + + /// An attempt was made to construct an object using a non-public constructor. + ERR_ILLEGAL_CONSTRUCTOR, + + /// An import assertion has failed, preventing the specified module to be imported. + /// Added in: v17.1.0 + ERR_IMPORT_ASSERTION_TYPE_FAILED, + + /// An import assertion is missing, preventing the specified module to be imported. + /// Added in: v17.1.0 + ERR_IMPORT_ASSERTION_TYPE_MISSING, + + /// An import assertion is not supported by this version of Node.js. + /// Added in: v17.1.0 + ERR_IMPORT_ASSERTION_TYPE_UNSUPPORTED, + + /// An option pair is incompatible with each other and cannot be used at the same time. + ERR_INCOMPATIBLE_OPTION_PAIR, + + /// Stability: 1 - Experimental + ERR_INPUT_TYPE_NOT_ALLOWED, + + /// The --input-type flag was used to attempt to execute a file. This flag can only be used with input via --eval, --print or STDIN. + /// While using the inspector module, an attempt was made to activate the inspector when it already started to listen on a port. Use inspector.close() before activating it on a different address. + ERR_INSPECTOR_ALREADY_ACTIVATED, + + /// While using the inspector module, an attempt was made to connect when the inspector was already connected. + ERR_INSPECTOR_ALREADY_CONNECTED, + + /// While using the inspector module, an attempt was made to use the inspector after the session had already closed. + ERR_INSPECTOR_CLOSED, + + /// An error occurred while issuing a command via the inspector module. + ERR_INSPECTOR_COMMAND, + + /// The inspector is not active when inspector.waitForDebugger() is called. + ERR_INSPECTOR_NOT_ACTIVE, + + /// The inspector module is not available for use. + ERR_INSPECTOR_NOT_AVAILABLE, + + /// While using the inspector module, an attempt was made to use the inspector before it was connected. + ERR_INSPECTOR_NOT_CONNECTED, + + /// An API was called on the main thread that can only be used from the worker thread. + ERR_INSPECTOR_NOT_WORKER, + + /// There was a bug in Node.js or incorrect usage of Node.js internals. To fix the error, open an issue at https://github.com/nodejs/node/issues. + ERR_INTERNAL_ASSERTION, + + /// The provided address family is not understood by the Node.js API. + ERR_INVALID_ADDRESS_FAMILY, + + /// An argument of the wrong type was passed to a Node.js API. + ERR_INVALID_ARG_TYPE, + + /// An invalid or unsupported value was passed for a given argument. + ERR_INVALID_ARG_VALUE, + + /// An invalid asyncId or triggerAsyncId was passed using AsyncHooks. An id less than -1 should never happen. + ERR_INVALID_ASYNC_ID, + + /// A swap was performed on a Buffer but its size was not compatible with the operation. + ERR_INVALID_BUFFER_SIZE, + + /// A callback function was required but was not been provided to a Node.js API. + ERR_INVALID_CALLBACK, + + /// Invalid characters were detected in headers. + ERR_INVALID_CHAR, + + /// A cursor on a given stream cannot be moved to a specified row without a specified column. + ERR_INVALID_CURSOR_POS, + + /// A file descriptor ('fd') was not valid (e.g. it was a negative value). + ERR_INVALID_FD, + + /// A file descriptor ('fd') type was not valid. + ERR_INVALID_FD_TYPE, + + /// A Node.js API that consumes file: URLs (such as certain functions in the fs module) encountered a file URL with an incompatible host. This situation can only occur on Unix-like systems where only localhost or an empty host is supported. + ERR_INVALID_FILE_URL_HOST, + + /// A Node.js API that consumes file: URLs (such as certain functions in the fs module) encountered a file URL with an incompatible path. The exact semantics for determining whether a path can be used is platform-dependent. + ERR_INVALID_FILE_URL_PATH, + + /// An attempt was made to send an unsupported "handle" over an IPC communication channel to a child process. See subprocess.send() and process.send() for more information. + ERR_INVALID_HANDLE_TYPE, + + /// An invalid HTTP token was supplied. + ERR_INVALID_HTTP_TOKEN, + + /// An IP address is not valid. + ERR_INVALID_IP_ADDRESS, + + /// Added in: v15.0.0, v14.18.0 + /// An attempt was made to load a module that does not exist or was otherwise not valid. + ERR_INVALID_MODULE, + + /// The imported module string is an invalid URL, package name, or package subpath specifier. + ERR_INVALID_MODULE_SPECIFIER, + + /// An invalid package.json file failed parsing. + ERR_INVALID_PACKAGE_CONFIG, + + /// The package.json "exports" field contains an invalid target mapping value for the attempted module resolution. + ERR_INVALID_PACKAGE_TARGET, + + /// While using the Performance Timing API (perf_hooks), a performance mark is invalid. + ERR_INVALID_PERFORMANCE_MARK, + + /// An invalid options.protocol was passed to http.request(). + ERR_INVALID_PROTOCOL, + + /// Both breakEvalOnSigint and eval options were set in the REPL config, which is not supported. + ERR_INVALID_REPL_EVAL_CONFIG, + + /// The input may not be used in the REPL. The conditions under which this error is used are described in the REPL documentation. + ERR_INVALID_REPL_INPUT, + + /// Thrown in case a function option does not provide a valid value for one of its returned object properties on execution. + ERR_INVALID_RETURN_PROPERTY, + + /// Thrown in case a function option does not provide an expected value type for one of its returned object properties on execution. + ERR_INVALID_RETURN_PROPERTY_VALUE, + + /// Thrown in case a function option does not return an expected value type on execution, such as when a function is expected to return a promise. + ERR_INVALID_RETURN_VALUE, + + /// Indicates that an operation cannot be completed due to an invalid state. For instance, an object may have already been destroyed, or may be performing another operation. + /// Added in: v15.0.0 + ERR_INVALID_STATE, + + /// A Buffer, TypedArray, DataView or string was provided as stdio input to an asynchronous fork. See the documentation for the child_process module for more information. + ERR_INVALID_SYNC_FORK_INPUT, + + /// A Node.js API function was called with an incompatible this value. + /// ```js + /// const urlSearchParams = new URLSearchParams('foo=bar&baz=new'); + /// const buf = Buffer.alloc(1); + /// urlSearchParams.has.call(buf, 'foo'); + /// ``` + /// Throws a TypeError with code 'ERR_INVALID_THIS' + ERR_INVALID_THIS, + + /// An invalid transfer object was passed to postMessage(). + ERR_INVALID_TRANSFER_OBJECT, + + /// An element in the iterable provided to the WHATWG URLSearchParams constructor did not represent a [name, value] tuple – that is, if an element is not iterable, or does not consist of exactly two elements. + ERR_INVALID_TUPLE, + + /// An invalid URI was passed. + ERR_INVALID_URI, + + /// An invalid URL was passed to the WHATWG URL constructor or the legacy url.parse() to be parsed. The thrown error object typically has an additional property 'input' that contains the URL that failed to parse. + ERR_INVALID_URL, + + /// An attempt was made to use a URL of an incompatible scheme (protocol) for a specific purpose. It is only used in the WHATWG URL API support in the fs module (which only accepts URLs with 'file' scheme), but may be used in other Node.js APIs as well in the future. + ERR_INVALID_URL_SCHEME, + + /// An attempt was made to use an IPC communication channel that was already closed. + ERR_IPC_CHANNEL_CLOSED, + + /// An attempt was made to disconnect an IPC communication channel that was already disconnected. See the documentation for the child_process module for more information. + ERR_IPC_DISCONNECTED, + + /// An attempt was made to create a child Node.js process using more than one IPC communication channel. See the documentation for the child_process module for more information. + ERR_IPC_ONE_PIPE, + + /// An attempt was made to open an IPC communication channel with a synchronously forked Node.js process. See the documentation for the child_process module for more information. + ERR_IPC_SYNC_FORK, + + /// An attempt was made to load a resource, but the resource did not match the integrity defined by the policy manifest. See the documentation for policy manifests for more information. + ERR_MANIFEST_ASSERT_INTEGRITY, + + /// An attempt was made to load a resource, but the resource was not listed as a dependency from the location that attempted to load it. See the documentation for policy manifests for more information. + ERR_MANIFEST_DEPENDENCY_MISSING, + + /// An attempt was made to load a policy manifest, but the manifest had multiple entries for a resource which did not match each other. Update the manifest entries to match in order to resolve this error. See the documentation for policy manifests for more information. + ERR_MANIFEST_INTEGRITY_MISMATCH, + + /// A policy manifest resource had an invalid value for one of its fields. Update the manifest entry to match in order to resolve this error. See the documentation for policy manifests for more information. + ERR_MANIFEST_INVALID_RESOURCE_FIELD, + + /// A policy manifest resource had an invalid value for one of its dependency mappings. Update the manifest entry to match to resolve this error. See the documentation for policy manifests for more information. + ERR_MANIFEST_INVALID_SPECIFIER, + + /// An attempt was made to load a policy manifest, but the manifest was unable to be parsed. See the documentation for policy manifests for more information. + ERR_MANIFEST_PARSE_POLICY, + + /// An attempt was made to read from a policy manifest, but the manifest initialization has not yet taken place. This is likely a bug in Node.js. + ERR_MANIFEST_TDZ, + + /// A policy manifest was loaded, but had an unknown value for its "onerror" behavior. See the documentation for policy manifests for more information. + ERR_MANIFEST_UNKNOWN_ONERROR, + + /// An attempt was made to allocate memory (usually in the C++ layer) but it failed. + ERR_MEMORY_ALLOCATION_FAILED, + + /// Added in: v14.5.0, v12.19.0 + /// A message posted to a MessagePort could not be deserialized in the target vm Context. Not all Node.js objects can be successfully instantiated in any context at this time, and attempting to transfer them using postMessage() can fail on the receiving side in that case. + ERR_MESSAGE_TARGET_CONTEXT_UNAVAILABLE, + + /// A method is required but not implemented. + ERR_METHOD_NOT_IMPLEMENTED, + + /// A required argument of a Node.js API was not passed. This is only used for strict compliance with the API specification (which in some cases may accept func(undefined) but not func()). In most native Node.js APIs, func(undefined) and func() are treated identically, and the ERR_INVALID_ARG_TYPE error code may be used instead. + ERR_MISSING_ARGS, + + /// For APIs that accept options objects, some options might be mandatory. This code is thrown if a required option is missing. + ERR_MISSING_OPTION, + + /// An attempt was made to read an encrypted key without specifying a passphrase. + ERR_MISSING_PASSPHRASE, + + /// The V8 platform used by this instance of Node.js does not support creating Workers. This is caused by lack of embedder support for Workers. In particular, this error will not occur with standard builds of Node.js. + ERR_MISSING_PLATFORM_FOR_WORKER, + + /// Added in: v15.0.0 + /// An object that needs to be explicitly listed in the transferList argument is in the object passed to a postMessage() call, but is not provided in the transferList for that call. Usually, this is a MessagePort. + /// In Node.js versions prior to v15.0.0, the error code being used here was ERR_MISSING_MESSAGE_PORT_IN_TRANSFER_LIST. However, the set of transferable object types has been expanded to cover more types than MessagePort. + ERR_MISSING_TRANSFERABLE_IN_TRANSFER_LIST, + + /// Stability: 1 - Experimental + /// An ES Module could not be resolved. + ERR_MODULE_NOT_FOUND, + + /// A callback was called more than once. + /// A callback is almost always meant to only be called once as the query can either be fulfilled or rejected but not both at the same time. The latter would be possible by calling a callback more than once. + ERR_MULTIPLE_CALLBACK, + + /// While using Node-API, a constructor passed was not a function. + ERR_NAPI_CONS_FUNCTION, + + /// While calling napi_create_dataview(), a given offset was outside the bounds of the dataview or offset + length was larger than a length of given buffer. + ERR_NAPI_INVALID_DATAVIEW_ARGS, + + /// While calling napi_create_typedarray(), the provided offset was not a multiple of the element size. + ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT, + + /// While calling napi_create_typedarray(), (length * size_of_element) + byte_offset was larger than the length of given buffer. + ERR_NAPI_INVALID_TYPEDARRAY_LENGTH, + + /// An error occurred while invoking the JavaScript portion of the thread-safe function. + ERR_NAPI_TSFN_CALL_JS, + + /// An error occurred while attempting to retrieve the JavaScript undefined value. + ERR_NAPI_TSFN_GET_UNDEFINED, + + /// On the main thread, values are removed from the queue associated with the thread-safe function in an idle loop. This error indicates that an error has occurred when attempting to start the loop. + ERR_NAPI_TSFN_START_IDLE_LOOP, + + /// Once no more items are left in the queue, the idle loop must be suspended. This error indicates that the idle loop has failed to stop. + ERR_NAPI_TSFN_STOP_IDLE_LOOP, + + /// An attempt was made to use crypto features while Node.js was not compiled with OpenSSL crypto support. + ERR_NO_CRYPTO, + + /// An attempt was made to use features that require ICU, but Node.js was not compiled with ICU support. + ERR_NO_ICU, + + /// A non-context-aware native addon was loaded in a process that disallows them. + ERR_NON_CONTEXT_AWARE_DISABLED, + + /// A given value is out of the accepted range. + ERR_OUT_OF_RANGE, + + /// The package.json "imports" field does not define the given internal package specifier mapping. + ERR_PACKAGE_IMPORT_NOT_DEFINED, + + /// The package.json "exports" field does not export the requested subpath. Because exports are encapsulated, private internal modules that are not exported cannot be imported through the package resolution, unless using an absolute URL. + ERR_PACKAGE_PATH_NOT_EXPORTED, + + /// An invalid timestamp value was provided for a performance mark or measure. + ERR_PERFORMANCE_INVALID_TIMESTAMP, + + /// Invalid options were provided for a performance measure. + ERR_PERFORMANCE_MEASURE_INVALID_OPTIONS, + + /// Accessing Object.prototype.__proto__ has been forbidden using --disable-proto=throw. Object.getPrototypeOf and Object.setPrototypeOf should be used to get and set the prototype of an object. + ERR_PROTO_ACCESS, + + /// Stability: 1 - Experimental + /// An attempt was made to require() an ES Module. + ERR_REQUIRE_ESM, + + /// Script execution was interrupted by SIGINT (For example, Ctrl+C was pressed.) + ERR_SCRIPT_EXECUTION_INTERRUPTED, + + /// Script execution timed out, possibly due to bugs in the script being executed. + ERR_SCRIPT_EXECUTION_TIMEOUT, + + /// The server.listen() method was called while a net.Server was already listening. This applies to all instances of net.Server, including HTTP, HTTPS, and HTTP/2 Server instances. + ERR_SERVER_ALREADY_LISTEN, + + /// The server.close() method was called when a net.Server was not running. This applies to all instances of net.Server, including HTTP, HTTPS, and HTTP/2 Server instances. + ERR_SERVER_NOT_RUNNING, + + /// An attempt was made to bind a socket that has already been bound. + ERR_SOCKET_ALREADY_BOUND, + + /// An invalid (negative) size was passed for either the recvBufferSize or sendBufferSize options in dgram.createSocket(). + ERR_SOCKET_BAD_BUFFER_SIZE, + + /// An API function expecting a port >= 0 and < 65536 received an invalid value. + ERR_SOCKET_BAD_PORT, + + /// An API function expecting a socket type (udp4 or udp6) received an invalid value. + ERR_SOCKET_BAD_TYPE, + + /// While using dgram.createSocket(), the size of the receive or send Buffer could not be determined. + ERR_SOCKET_BUFFER_SIZE, + + /// An attempt was made to operate on an already closed socket. + ERR_SOCKET_CLOSED, + + /// A dgram.connect() call was made on an already connected socket. + ERR_SOCKET_DGRAM_IS_CONNECTED, + + /// A dgram.disconnect() or dgram.remoteAddress() call was made on a disconnected socket. + ERR_SOCKET_DGRAM_NOT_CONNECTED, + + /// A call was made and the UDP subsystem was not running. + ERR_SOCKET_DGRAM_NOT_RUNNING, + + /// A string was provided for a Subresource Integrity check, but was unable to be parsed. Check the format of integrity attributes by looking at the Subresource Integrity specification. + ERR_SRI_PARSE, + + /// A stream method was called that cannot complete because the stream was finished. + ERR_STREAM_ALREADY_FINISHED, + + /// An attempt was made to call stream.pipe() on a Writable stream. + ERR_STREAM_CANNOT_PIPE, + + /// A stream method was called that cannot complete because the stream was destroyed using stream.destroy(). + ERR_STREAM_DESTROYED, + + /// An attempt was made to call stream.write() with a null chunk. + ERR_STREAM_NULL_VALUES, + + /// An error returned by stream.finished() and stream.pipeline(), when a stream or a pipeline ends non gracefully with no explicit error. + ERR_STREAM_PREMATURE_CLOSE, + + /// An attempt was made to call stream.push() after a null(EOF) had been pushed to the stream. + ERR_STREAM_PUSH_AFTER_EOF, + + /// An attempt was made to call stream.unshift() after the 'end' event was emitted. + ERR_STREAM_UNSHIFT_AFTER_END_EVENT, + + /// Prevents an abort if a string decoder was set on the Socket or if the decoder is in objectMode. + /// const Socket = require('net').Socket; + /// const instance = new Socket(); + /// instance.setEncoding('utf8'); + ERR_STREAM_WRAP, + + /// An attempt was made to call stream.write() after stream.end() has been called. + ERR_STREAM_WRITE_AFTER_END, + + /// An attempt has been made to create a string longer than the maximum allowed length. + ERR_STRING_TOO_LONG, + + /// An artificial error object used to capture the call stack for diagnostic reports. + ERR_SYNTHETIC, + + /// An unspecified or non-specific system error has occurred within the Node.js process. The error object will have an err.info object property with additional details. + ERR_SYSTEM_ERROR, + + /// This error is thrown by checkServerIdentity if a user-supplied subjectaltname property violates encoding rules. Certificate objects produced by Node.js itself always comply with encoding rules and will never cause this error. + ERR_TLS_CERT_ALTNAME_FORMAT, + + /// While using TLS, the host name/IP of the peer did not match any of the subjectAltNames in its certificate. + ERR_TLS_CERT_ALTNAME_INVALID, + + /// While using TLS, the parameter offered for the Diffie-Hellman (DH) key-agreement protocol is too small. By default, the key length must be greater than or equal to 1024 bits to avoid vulnerabilities, even though it is strongly recommended to use 2048 bits or larger for stronger security. + ERR_TLS_DH_PARAM_SIZE, + + /// A TLS/SSL handshake timed out. In this case, the server must also abort the connection. + ERR_TLS_HANDSHAKE_TIMEOUT, + + /// The context must be a SecureContext. + /// Added in: v13.3.0 + ERR_TLS_INVALID_CONTEXT, + + /// The specified secureProtocol method is invalid. It is either unknown, or disabled because it is insecure. + ERR_TLS_INVALID_PROTOCOL_METHOD, + + /// Valid TLS protocol versions are 'TLSv1', 'TLSv1.1', or 'TLSv1.2'. + ERR_TLS_INVALID_PROTOCOL_VERSION, + + /// The TLS socket must be connected and securily established. Ensure the 'secure' event is emitted before continuing. + /// Added in: v13.10.0, v12.17.0 + ERR_TLS_INVALID_STATE, + + /// Attempting to set a TLS protocol minVersion or maxVersion conflicts with an attempt to set the secureProtocol explicitly. Use one mechanism or the other. + ERR_TLS_PROTOCOL_VERSION_CONFLICT, + + /// Failed to set PSK identity hint. Hint may be too long. + ERR_TLS_PSK_SET_IDENTIY_HINT_FAILED, + + /// An attempt was made to renegotiate TLS on a socket instance with TLS disabled. + ERR_TLS_RENEGOTIATION_DISABLED, + + /// While using TLS, the server.addContext() method was called without providing a host name in the first parameter. + ERR_TLS_REQUIRED_SERVER_NAME, + + /// An excessive amount of TLS renegotiations is detected, which is a potential vector for denial-of-service attacks. + ERR_TLS_SESSION_ATTACK, + + /// An attempt was made to issue Server Name Indication from a TLS server-side socket, which is only valid from a client. + ERR_TLS_SNI_FROM_SERVER, + + /// The trace_events.createTracing() method requires at least one trace event category. + ERR_TRACE_EVENTS_CATEGORY_REQUIRED, + + /// The trace_events module could not be loaded because Node.js was compiled with the --without-v8-platform flag. + ERR_TRACE_EVENTS_UNAVAILABLE, + + /// A Transform stream finished while it was still transforming. + ERR_TRANSFORM_ALREADY_TRANSFORMING, + + /// A Transform stream finished with data still in the write buffer. + ERR_TRANSFORM_WITH_LENGTH_0, + + /// The initialization of a TTY failed due to a system error. + ERR_TTY_INIT_FAILED, + + /// Function was called within a process.on('exit') handler that shouldn't be called within process.on('exit') handler. + ERR_UNAVAILABLE_DURING_EXIT, + + /// process.setUncaughtExceptionCaptureCallback() was called twice, without first resetting the callback to null. + /// This error is designed to prevent accidentally overwriting a callback registered from another module. + ERR_UNCAUGHT_EXCEPTION_CAPTURE_ALREADY_SET, + + /// A string that contained unescaped characters was received. + ERR_UNESCAPED_CHARACTERS, + + /// An unhandled error occurred (for instance, when an 'error' event is emitted by an EventEmitter but an 'error' handler is not registered). + ERR_UNHANDLED_ERROR, + + /// Used to identify a specific kind of internal Node.js error that should not typically be triggered by user code. Instances of this error point to an internal bug within the Node.js binary itself. + ERR_UNKNOWN_BUILTIN_MODULE, + + /// A Unix group or user identifier that does not exist was passed. + ERR_UNKNOWN_CREDENTIAL, + + /// An invalid or unknown encoding option was passed to an API. + ERR_UNKNOWN_ENCODING, + + /// Stability: 1 - Experimental + /// An attempt was made to load a module with an unknown or unsupported file extension. + ERR_UNKNOWN_FILE_EXTENSION, + + /// An attempt was made to load a module with an unknown or unsupported format. + ERR_UNKNOWN_MODULE_FORMAT, + + /// An invalid or unknown process signal was passed to an API expecting a valid signal (such as subprocess.kill()). + ERR_UNKNOWN_SIGNAL, + + /// import a directory URL is unsupported. Instead, self-reference a package using its name and define a custom subpath in the "exports" field of the package.json file. + /// ```js + /// import './'; // unsupported + /// import './index.js'; // supported + /// import 'package-name'; // supported + /// ``` + ERR_UNSUPPORTED_DIR_IMPORT, + + /// import with URL schemes other than file and data is unsupported. + ERR_UNSUPPORTED_ESM_URL_SCHEME, + + /// While using the Performance Timing API (perf_hooks), no valid performance entry types are found. + ERR_VALID_PERFORMANCE_ENTRY_TYPE, + + /// A dynamic import callback was not specified. + ERR_VM_DYNAMIC_IMPORT_CALLBACK_MISSING, + + /// The module attempted to be linked is not eligible for linking, because of one of the following reasons: + /// - It has already been linked (linkingStatus is 'linked') + /// - It is being linked (linkingStatus is 'linking') + /// - Linking has failed for this module (linkingStatus is 'errored') + ERR_VM_MODULE_ALREADY_LINKED, + + /// The cachedData option passed to a module constructor is invalid. + ERR_VM_MODULE_CACHED_DATA_REJECTED, + + /// Cached data cannot be created for modules which have already been evaluated. + ERR_VM_MODULE_CANNOT_CREATE_CACHED_DATA, + + /// The module being returned from the linker function is from a different context than the parent module. Linked modules must share the same context. + ERR_VM_MODULE_DIFFERENT_CONTEXT, + + /// The linker function returned a module for which linking has failed. + ERR_VM_MODULE_LINKING_ERRORED, + + /// The module was unable to be linked due to a failure. + ERR_VM_MODULE_LINK_FAILURE, + + /// The fulfilled value of a linking promise is not a vm.Module object. + ERR_VM_MODULE_NOT_MODULE, + + /// The current module's status does not allow for this operation. The specific meaning of the error depends on the specific function. + ERR_VM_MODULE_STATUS, + + /// The WASI instance has already started. + ERR_WASI_ALREADY_STARTED, + + /// The WASI instance has not been started. + ERR_WASI_NOT_STARTED, + + /// The Worker initialization failed. + ERR_WORKER_INIT_FAILED, + + /// The execArgv option passed to the Worker constructor contains invalid flags. + ERR_WORKER_INVALID_EXEC_ARGV, + + /// An operation failed because the Worker instance is not currently running. + ERR_WORKER_NOT_RUNNING, + + /// The Worker instance terminated because it reached its memory limit. + ERR_WORKER_OUT_OF_MEMORY, + + /// The path for the main script of a worker is neither an absolute path nor a relative path starting with ./ or ../. + ERR_WORKER_PATH, + + /// All attempts at serializing an uncaught exception from a worker thread failed. + ERR_WORKER_UNSERIALIZABLE_ERROR, + + /// The requested functionality is not supported in worker threads. + ERR_WORKER_UNSUPPORTED_OPERATION, + + /// Creation of a zlib object failed due to incorrect configuration. + ERR_ZLIB_INITIALIZATION_FAILED, + + /// Too much HTTP header data was received. In order to protect against malicious or malconfigured clients, if more than 8 KB of HTTP header data is received then HTTP parsing will abort without a request or response object being created, and an Error with this code will be emitted. + HPE_HEADER_OVERFLOW, + + /// Server is sending both a Content-Length header and Transfer-Encoding: chunked. + /// Transfer-Encoding: chunked allows the server to maintain an HTTP persistent connection for dynamically generated content. In this case, the Content-Length HTTP header cannot be used. + /// Use Content-Length or Transfer-Encoding: chunked. + HPE_UNEXPECTED_CONTENT_LENGTH, + + /// History + /// A module file could not be resolved while attempting a require() or import operation. + MODULE_NOT_FOUND, + + // -- Deprecated --- + + /// The value passed to postMessage() contained an object that is not supported for transferring. + ERR_CANNOT_TRANSFER_OBJECT, + + /// The UTF-16 encoding was used with hash.digest(). While the hash.digest() method does allow an encoding argument to be passed in, causing the method to return a string rather than a Buffer, the UTF-16 encoding (e.g. ucs or utf16le) is not supported. + /// Added in: v9.0.0Removed in: v12.12.0 + ERR_CRYPTO_HASH_DIGEST_NO_UTF16, + + /// Added in: v9.0.0Removed in: v10.0.0 + ERR_HTTP2_FRAME_ERROR, + + /// Used when a failure occurs sending an individual frame on the HTTP/2 session. + /// Added in: v9.0.0Removed in: v10.0.0 + ERR_HTTP2_HEADERS_OBJECT, + + /// Used when an HTTP/2 Headers Object is expected. + /// Added in: v9.0.0Removed in: v10.0.0 + /// Used when a required header is missing in an HTTP/2 message. + ERR_HTTP2_HEADER_REQUIRED, + + /// HTTP/2 informational headers must only be sent prior to calling the Http2Stream.prototype.respond() method. + /// Added in: v9.0.0Removed in: v10.0.0 + ERR_HTTP2_INFO_HEADERS_AFTER_RESPOND, + + /// Used when an action has been performed on an HTTP/2 Stream that has already been closed. + /// Added in: v9.0.0Removed in: v10.0.0 + ERR_HTTP2_STREAM_CLOSED, + + /// Used when an invalid character is found in an HTTP response status message (reason phrase). + /// Added in: v10.0.0Removed in: v11.0.0 + ERR_HTTP_INVALID_CHAR, + + /// A given index was out of the accepted range (e.g. negative offsets). + /// Added in: v8.0.0Removed in: v15.0.0 + ERR_INDEX_OUT_OF_RANGE, + + /// An invalid or unexpected value was passed in an options object. + ERR_INVALID_OPT_VALUE, + + /// Added in: v9.0.0Removed in: v15.0.0 + /// An invalid or unknown file encoding was passed. + ERR_INVALID_OPT_VALUE_ENCODING, + + /// Removed in: v15.0.0 + /// This error code was replaced by ERR_MISSING_TRANSFERABLE_IN_TRANSFER_LIST in Node.js v15.0.0, because it is no longer accurate as other types of transferable objects also exist now. + ERR_MISSING_MESSAGE_PORT_IN_TRANSFER_LIST, + + /// Added in: v9.0.0Removed in: v10.0.0 + /// Used by the Node-API when Constructor.prototype is not an object. + ERR_NAPI_CONS_PROTOTYPE_OBJECT, + + /// A Node.js API was called in an unsupported manner, such as Buffer.write(string, encoding, offset[, length]). + ERR_NO_LONGER_SUPPORTED, + + /// An operation failed. This is typically used to signal the general failure of an asynchronous operation. + /// Added in: v15.0.0 + ERR_OPERATION_FAILED, + + /// Added in: v9.0.0Removed in: v10.0.0 + /// Used generically to identify that an operation caused an out of memory condition. + ERR_OUTOFMEMORY, + + /// Added in: v9.0.0Removed in: v10.0.0 + /// The repl module was unable to parse data from the REPL history file. + ERR_PARSE_HISTORY_DATA, + + /// Data could not be sent on a socket. + ERR_SOCKET_CANNOT_SEND, + + /// An attempt was made to close the process.stderr stream. By design, Node.js does not allow stdout or stderr streams to be closed by user code. + ERR_STDERR_CLOSE, + + /// An attempt was made to close the process.stdout stream. By design, Node.js does not allow stdout or stderr streams to be closed by user code. + ERR_STDOUT_CLOSE, + + /// Added in: v9.0.0Removed in: v10.0.0 + /// Used when an attempt is made to use a readable stream that has not implemented readable._read(). + ERR_STREAM_READ_NOT_IMPLEMENTED, + + /// Added in: v9.0.0Removed in: v10.0.0 + /// Used when a TLS renegotiation request has failed in a non-specific way. + /// Added in: v10.5.0Removed in: v14.0.0 + ERR_TLS_RENEGOTIATION_FAILED, + + /// A SharedArrayBuffer whose memory is not managed by the JavaScript engine or by Node.js was encountered during serialization. Such a SharedArrayBuffer cannot be serialized. + /// This can only happen when native addons create SharedArrayBuffers in "externalized" mode, or put existing SharedArrayBuffer into externalized mode. + ERR_TRANSFERRING_EXTERNALIZED_SHAREDARRAYBUFFER, + + /// Added in: v8.0.0Removed in: v11.7.0 + /// An attempt was made to launch a Node.js process with an unknown stdin file type. This error is usually an indication of a bug within Node.js itself, although it is possible for user code to trigger it. + ERR_UNKNOWN_STDIN_TYPE, + + /// Added in: v8.0.0Removed in: v11.7.0 + /// An attempt was made to launch a Node.js process with an unknown stdout or stderr file type. This error is usually an indication of a bug within Node.js itself, although it is possible for user code to trigger it. + ERR_UNKNOWN_STREAM_TYPE, + + /// The V8 BreakIterator API was used but the full ICU data set is not installed. + ERR_V8BREAKITERATOR, + + /// Added in: v9.0.0Removed in: v10.0.0 + /// Used when a given value is out of the accepted range. + ERR_VALUE_OUT_OF_RANGE, + + /// The module must be successfully linked before instantiation. + ERR_VM_MODULE_NOT_LINKED, + + /// Added in: v11.0.0Removed in: v16.9.0 + /// The pathname used for the main script of a worker has an unknown file extension. + ERR_WORKER_UNSUPPORTED_EXTENSION, + + /// Added in: v9.0.0Removed in: v10.0.0 + ERR_ZLIB_BINDING_CLOSED, + + /// Used when an attempt is made to use a zlib object after it has already been closed. + /// CPU USAGE + ERR_CPU_USAGE, +}; diff --git a/src/bun.js/node/syscall.zig b/src/bun.js/node/syscall.zig new file mode 100644 index 000000000..b931e9c0e --- /dev/null +++ b/src/bun.js/node/syscall.zig @@ -0,0 +1,622 @@ +// This file is entirely based on Zig's std.os +// The differences are in error handling +const std = @import("std"); +const os = std.os; +const builtin = @import("builtin"); + +const Syscall = @This(); +const Environment = @import("../../global.zig").Environment; +const default_allocator = @import("../../global.zig").default_allocator; +const JSC = @import("../../jsc.zig"); +const SystemError = JSC.SystemError; +const bun = @import("../../global.zig"); +const MAX_PATH_BYTES = bun.MAX_PATH_BYTES; +const fd_t = bun.FileDescriptorType; +const C = @import("../../global.zig").C; +const linux = os.linux; +const Maybe = JSC.Maybe; + +pub const system = if (Environment.isLinux) linux else @import("io").darwin; +pub const S = struct { + pub usingnamespace if (Environment.isLinux) linux.S else std.os.S; +}; +const sys = std.os.system; + +const statSym = if (Environment.isMac) + C.stat +else if (Environment.isLinux) + linux.stat +else + @compileError("STAT"); + +const fstatSym = if (Environment.isMac) + C.fstat +else if (Environment.isLinux) + linux.fstat +else + @compileError("STAT"); + +const lstat64 = if (Environment.isMac) + C.lstat +else if (Environment.isLinux) + linux.lstat +else + @compileError("STAT"); + +pub const Tag = enum(u8) { + TODO, + + access, + chmod, + chown, + clonefile, + close, + copy_file_range, + copyfile, + fchmod, + fchown, + fcntl, + fdatasync, + fstat, + fsync, + ftruncate, + futimens, + getdents64, + getdirentries64, + lchmod, + lchown, + link, + lseek, + lstat, + lutimes, + mkdir, + mkdtemp, + mmap, + munmap, + open, + pread, + pwrite, + read, + readlink, + rename, + stat, + symlink, + unlink, + utimes, + write, + getcwd, + chdir, + fcopyfile, + recv, + send, + sendfile, + splice, + + kevent, + kqueue, + pub var strings = std.EnumMap(Tag, JSC.C.JSStringRef).initFull(null); +}; +const PathString = @import("../../global.zig").PathString; + +const mode_t = os.mode_t; + +const open_sym = system.open; + +const mem = std.mem; + +pub fn getcwd(buf: *[bun.MAX_PATH_BYTES]u8) Maybe([]const u8) { + const Result = Maybe([]const u8); + buf[0] = 0; + const rc = std.c.getcwd(buf, bun.MAX_PATH_BYTES); + return if (rc != null) + Result{ .result = std.mem.sliceTo(rc.?[0..bun.MAX_PATH_BYTES], 0) } + else + Result.errnoSys(0, .getcwd).?; +} + +pub fn fchmod(fd: JSC.Node.FileDescriptor, mode: JSC.Node.Mode) Maybe(void) { + return Maybe(void).errnoSys(C.fchmod(fd, mode), .fchmod) orelse + Maybe(void).success; +} + +pub fn chdir(destination: [:0]const u8) Maybe(void) { + const rc = sys.chdir(destination); + return Maybe(void).errnoSys(rc, .chdir) orelse Maybe(void).success; +} + +pub fn stat(path: [:0]const u8) Maybe(os.Stat) { + var stat_ = mem.zeroes(os.Stat); + if (Maybe(os.Stat).errnoSys(statSym(path, &stat_), .stat)) |err| return err; + return Maybe(os.Stat){ .result = stat_ }; +} + +pub fn lstat(path: [:0]const u8) Maybe(os.Stat) { + var stat_ = mem.zeroes(os.Stat); + if (Maybe(os.Stat).errnoSys(lstat64(path, &stat_), .lstat)) |err| return err; + return Maybe(os.Stat){ .result = stat_ }; +} + +pub fn fstat(fd: JSC.Node.FileDescriptor) Maybe(os.Stat) { + var stat_ = mem.zeroes(os.Stat); + if (Maybe(os.Stat).errnoSys(fstatSym(fd, &stat_), .fstat)) |err| return err; + return Maybe(os.Stat){ .result = stat_ }; +} + +pub fn mkdir(file_path: [:0]const u8, flags: JSC.Node.Mode) Maybe(void) { + if (comptime Environment.isMac) { + return Maybe(void).errnoSysP(system.mkdir(file_path, flags), .mkdir, file_path) orelse Maybe(void).success; + } + + if (comptime Environment.isLinux) { + return Maybe(void).errnoSysP(linux.mkdir(file_path, flags), .mkdir, file_path) orelse Maybe(void).success; + } +} + +pub fn getErrno(rc: anytype) std.os.E { + if (comptime Environment.isMac) return std.os.errno(rc); + const Type = @TypeOf(rc); + + return switch (Type) { + comptime_int, usize => std.os.linux.getErrno(@as(usize, rc)), + i32, c_int, isize => std.os.linux.getErrno(@bitCast(usize, @as(isize, rc))), + else => @compileError("Not implemented yet for type " ++ @typeName(Type)), + }; +} + +pub fn open(file_path: [:0]const u8, flags: JSC.Node.Mode, perm: JSC.Node.Mode) Maybe(JSC.Node.FileDescriptor) { + while (true) { + const rc = Syscall.system.open(file_path, flags, perm); + return switch (Syscall.getErrno(rc)) { + .SUCCESS => .{ .result = @intCast(JSC.Node.FileDescriptor, rc) }, + .INTR => continue, + else => |err| { + return Maybe(std.os.fd_t){ + .err = .{ + .errno = @truncate(Syscall.Error.Int, @enumToInt(err)), + .syscall = .open, + }, + }; + }, + }; + } + + unreachable; +} + +// The zig standard library marks BADF as unreachable +// That error is not unreachable for us +pub fn close(fd: std.os.fd_t) ?Syscall.Error { + if (comptime Environment.isMac) { + // This avoids the EINTR problem. + return switch (system.getErrno(system.@"close$NOCANCEL"(fd))) { + .BADF => Syscall.Error{ .errno = @enumToInt(os.E.BADF), .syscall = .close }, + else => null, + }; + } + + if (comptime Environment.isLinux) { + return switch (linux.getErrno(linux.close(fd))) { + .BADF => Syscall.Error{ .errno = @enumToInt(os.E.BADF), .syscall = .close }, + else => null, + }; + } + + @compileError("Not implemented yet"); +} + +const max_count = switch (builtin.os.tag) { + .linux => 0x7ffff000, + .macos, .ios, .watchos, .tvos => std.math.maxInt(i32), + else => std.math.maxInt(isize), +}; + +pub fn write(fd: os.fd_t, bytes: []const u8) Maybe(usize) { + const adjusted_len = @minimum(max_count, bytes.len); + + while (true) { + const rc = sys.write(fd, bytes.ptr, adjusted_len); + if (Maybe(usize).errnoSys(rc, .write)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(usize){ .result = @intCast(usize, rc) }; + } + unreachable; +} + +const pread_sym = if (builtin.os.tag == .linux and builtin.link_libc) + sys.pread64 +else if (builtin.os.tag.isDarwin()) + system.@"pread$NOCANCEL" +else + system.pread; + +pub fn pread(fd: os.fd_t, buf: []u8, offset: i64) Maybe(usize) { + const adjusted_len = @minimum(buf.len, max_count); + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + while (true) { + const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset); + if (Maybe(usize).errnoSys(rc, .pread)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(usize){ .result = @intCast(usize, rc) }; + } + unreachable; +} + +const pwrite_sym = if (builtin.os.tag == .linux and builtin.link_libc) + sys.pwrite64 +else + sys.pwrite; + +pub fn pwrite(fd: os.fd_t, bytes: []const u8, offset: i64) Maybe(usize) { + const adjusted_len = @minimum(bytes.len, max_count); + + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + while (true) { + const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset); + return if (Maybe(usize).errnoSys(rc, .pwrite)) |err| { + switch (err.getErrno()) { + .INTR => continue, + else => return err, + } + } else Maybe(usize){ .result = @intCast(usize, rc) }; + } + + unreachable; +} + +pub fn read(fd: os.fd_t, buf: []u8) Maybe(usize) { + const adjusted_len = @minimum(buf.len, max_count); + if (comptime Environment.isMac) { + const rc = system.@"read$NOCANCEL"(fd, buf.ptr, adjusted_len); + if (Maybe(usize).errnoSys(rc, .read)) |err| { + return err; + } + return Maybe(usize){ .result = @intCast(usize, rc) }; + } else { + while (true) { + const rc = sys.read(fd, buf.ptr, adjusted_len); + if (Maybe(usize).errnoSys(rc, .read)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(usize){ .result = @intCast(usize, rc) }; + } + } + unreachable; +} + +pub fn recv(fd: os.fd_t, buf: []u8, flag: u32) Maybe(usize) { + if (comptime Environment.isMac) { + const rc = system.@"recvfrom$NOCANCEL"(fd, buf.ptr, buf.len, flag, null, null); + if (Maybe(usize).errnoSys(rc, .recv)) |err| { + return err; + } + return Maybe(usize){ .result = @intCast(usize, rc) }; + } else { + while (true) { + const rc = linux.recvfrom(fd, buf.ptr, buf.len, flag | os.SOCK.CLOEXEC | linux.MSG.CMSG_CLOEXEC, null, null); + if (Maybe(usize).errnoSys(rc, .recv)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(usize){ .result = @intCast(usize, rc) }; + } + } + unreachable; +} + +pub fn send(fd: os.fd_t, buf: []const u8, flag: u32) Maybe(usize) { + if (comptime Environment.isMac) { + const rc = system.@"sendto$NOCANCEL"(fd, buf.ptr, buf.len, flag, null, 0); + if (Maybe(usize).errnoSys(rc, .send)) |err| { + return err; + } + return Maybe(usize){ .result = @intCast(usize, rc) }; + } else { + while (true) { + const rc = linux.sendto(fd, buf.ptr, buf.len, flag | os.SOCK.CLOEXEC | os.MSG.NOSIGNAL, null, 0); + + if (Maybe(usize).errnoSys(rc, .send)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + + return Maybe(usize){ .result = @intCast(usize, rc) }; + } + } + unreachable; +} + +pub fn readlink(in: [:0]const u8, buf: []u8) Maybe(usize) { + while (true) { + const rc = sys.readlink(in, buf.ptr, buf.len); + + if (Maybe(usize).errnoSys(rc, .readlink)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(usize){ .result = @intCast(usize, rc) }; + } + unreachable; +} + +pub fn rename(from: [:0]const u8, to: [:0]const u8) Maybe(void) { + while (true) { + if (Maybe(void).errnoSys(sys.rename(from, to), .rename)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(void).success; + } + unreachable; +} + +pub fn chown(path: [:0]const u8, uid: os.uid_t, gid: os.gid_t) Maybe(void) { + while (true) { + if (Maybe(void).errnoSys(C.chown(path, uid, gid), .chown)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(void).success; + } + unreachable; +} + +pub fn symlink(from: [:0]const u8, to: [:0]const u8) Maybe(void) { + while (true) { + if (Maybe(void).errnoSys(sys.symlink(from, to), .symlink)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(void).success; + } + unreachable; +} + +pub fn clonefile(from: [:0]const u8, to: [:0]const u8) Maybe(void) { + if (comptime !Environment.isMac) @compileError("macOS only"); + + while (true) { + if (Maybe(void).errnoSys(C.darwin.clonefile(from, to, 0), .clonefile)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(void).success; + } + unreachable; +} + +pub fn copyfile(from: [:0]const u8, to: [:0]const u8, flags: c_int) Maybe(void) { + if (comptime !Environment.isMac) @compileError("macOS only"); + + while (true) { + if (Maybe(void).errnoSys(C.darwin.copyfile(from, to, null, flags), .copyfile)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(void).success; + } + unreachable; +} + +pub fn fcopyfile(fd_in: std.os.fd_t, fd_out: std.os.fd_t, flags: u32) Maybe(void) { + if (comptime !Environment.isMac) @compileError("macOS only"); + + while (true) { + if (Maybe(void).errnoSys(system.fcopyfile(fd_in, fd_out, null, flags), .fcopyfile)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(void).success; + } + unreachable; +} + +pub fn unlink(from: [:0]const u8) Maybe(void) { + while (true) { + if (Maybe(void).errno(sys.unlink(from), .unlink)) |err| { + if (err.getErrno() == .INTR) continue; + return err; + } + return Maybe(void).success; + } + unreachable; +} + +pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) Maybe([]u8) { + switch (comptime builtin.os.tag) { + .windows => { + const windows = std.os.windows; + var wide_buf: [windows.PATH_MAX_WIDE]u16 = undefined; + const wide_slice = windows.GetFinalPathNameByHandle(fd, .{}, wide_buf[0..]) catch { + return Maybe([]u8){ .err = .{ .errno = .EBADF } }; + }; + + // Trust that Windows gives us valid UTF-16LE. + const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable; + return .{ .result = out_buffer[0..end_index] }; + }, + .macos, .ios, .watchos, .tvos => { + // On macOS, we can use F.GETPATH fcntl command to query the OS for + // the path to the file descriptor. + @memset(out_buffer, 0, MAX_PATH_BYTES); + if (Maybe([]u8).errnoSys(system.fcntl(fd, os.F.GETPATH, out_buffer), .fcntl)) |err| { + return err; + } + const len = mem.indexOfScalar(u8, out_buffer[0..], @as(u8, 0)) orelse MAX_PATH_BYTES; + return .{ .result = out_buffer[0..len] }; + }, + .linux => { + // TODO: alpine linux may not have /proc/self + var procfs_buf: ["/proc/self/fd/-2147483648".len:0]u8 = undefined; + const proc_path = std.fmt.bufPrintZ(procfs_buf[0..], "/proc/self/fd/{d}\x00", .{fd}) catch unreachable; + + return switch (readlink(proc_path, out_buffer)) { + .err => |err| return .{ .err = err }, + .result => |len| return .{ .result = out_buffer[0..len] }, + }; + }, + // .solaris => { + // var procfs_buf: ["/proc/self/path/-2147483648".len:0]u8 = undefined; + // const proc_path = std.fmt.bufPrintZ(procfs_buf[0..], "/proc/self/path/{d}", .{fd}) catch unreachable; + + // const target = readlinkZ(proc_path, out_buffer) catch |err| switch (err) { + // error.UnsupportedReparsePointType => unreachable, + // error.NotLink => unreachable, + // else => |e| return e, + // }; + // return target; + // }, + else => @compileError("querying for canonical path of a handle is unsupported on this host"), + } +} + +/// Use of a mapped region can result in these signals: +/// * SIGSEGV - Attempted write into a region mapped as read-only. +/// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file +fn mmap( + ptr: ?[*]align(mem.page_size) u8, + length: usize, + prot: u32, + flags: u32, + fd: os.fd_t, + offset: u64, +) Maybe([]align(mem.page_size) u8) { + const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned + const rc = std.c.mmap(ptr, length, prot, flags, fd, ioffset); + const fail = std.c.MAP.FAILED; + if (rc == fail) { + return Maybe([]align(mem.page_size) u8){ + .err = .{ .errno = @truncate(Syscall.Error.Int, @enumToInt(std.c.getErrno(@bitCast(i64, @ptrToInt(fail))))), .syscall = .mmap }, + }; + } + + return Maybe([]align(mem.page_size) u8){ .result = @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length] }; +} + +pub fn mmapFile(path: [:0]const u8, flags: u32, wanted_size: ?usize, offset: usize) Maybe([]align(mem.page_size) u8) { + const fd = switch (open(path, os.O.RDWR, 0)) { + .result => |fd| fd, + .err => |err| return .{ .err = err }, + }; + + var size = std.math.sub(usize, @intCast(usize, switch (fstat(fd)) { + .result => |result| result.size, + .err => |err| { + _ = close(fd); + return .{ .err = err }; + }, + }), offset) catch 0; + + if (wanted_size) |size_| size = @minimum(size, size_); + + const map = switch (mmap(null, size, os.PROT.READ | os.PROT.WRITE, flags, fd, offset)) { + .result => |map| map, + + .err => |err| { + _ = close(fd); + return .{ .err = err }; + }, + }; + + if (close(fd)) |err| { + _ = munmap(map); + return .{ .err = err }; + } + + return .{ .result = map }; +} + +pub fn munmap(memory: []align(mem.page_size) const u8) Maybe(void) { + if (Maybe(void).errnoSys(system.munmap(memory.ptr, memory.len), .munmap)) |err| { + return err; + } else return Maybe(void).success; +} + +pub const Error = struct { + const max_errno_value = brk: { + const errno_values = std.enums.values(os.E); + var err = @enumToInt(os.E.SUCCESS); + for (errno_values) |errn| { + err = @maximum(err, @enumToInt(errn)); + } + break :brk err; + }; + pub const Int: type = std.math.IntFittingRange(0, max_errno_value + 5); + + errno: Int, + syscall: Syscall.Tag = @intToEnum(Syscall.Tag, 0), + path: []const u8 = "", + + pub fn fromCode(errno: os.E, syscall: Syscall.Tag) Error { + return .{ .errno = @truncate(Int, @enumToInt(errno)), .syscall = syscall }; + } + + pub const oom = fromCode(os.E.NOMEM, .read); + + pub const retry = Error{ + .errno = if (Environment.isLinux) + @intCast(Int, @enumToInt(os.E.AGAIN)) + else if (Environment.isMac) + @intCast(Int, @enumToInt(os.E.WOULDBLOCK)) + else + @intCast(Int, @enumToInt(os.E.INTR)), + .syscall = .retry, + }; + + pub inline fn getErrno(this: Error) os.E { + return @intToEnum(os.E, this.errno); + } + + pub inline fn withPath(this: Error, path: anytype) Error { + return Error{ + .errno = this.errno, + .syscall = this.syscall, + .path = bun.span(path), + }; + } + + pub inline fn withSyscall(this: Error, syscall: Syscall) Error { + return Error{ + .errno = this.errno, + .syscall = syscall, + .path = this.path, + }; + } + + pub const todo_errno = std.math.maxInt(Int) - 1; + pub const todo = Error{ .errno = todo_errno }; + + pub fn toSystemError(this: Error) SystemError { + var err = SystemError{ + .errno = @as(c_int, this.errno) * -1, + .syscall = JSC.ZigString.init(@tagName(this.syscall)), + }; + + // errno label + if (this.errno > 0 and this.errno < C.SystemErrno.max) { + const system_errno = @intToEnum(C.SystemErrno, this.errno); + err.code = JSC.ZigString.init(@tagName(system_errno)); + if (C.SystemErrno.labels.get(system_errno)) |label| { + err.message = JSC.ZigString.init(label); + } + } + + if (this.path.len > 0) { + err.path = JSC.ZigString.init(this.path); + } + + return err; + } + + pub fn toJS(this: Error, ctx: JSC.C.JSContextRef) JSC.C.JSObjectRef { + return this.toSystemError().toErrorInstance(ctx.ptr()).asObjectRef(); + } + + pub fn toJSC(this: Error, ptr: *JSC.JSGlobalObject) JSC.JSValue { + return this.toSystemError().toErrorInstance(ptr); + } +}; diff --git a/src/bun.js/node/types.zig b/src/bun.js/node/types.zig new file mode 100644 index 000000000..52a35699b --- /dev/null +++ b/src/bun.js/node/types.zig @@ -0,0 +1,2702 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const bun = @import("../../global.zig"); +const strings = bun.strings; +const string = bun.string; +const AsyncIO = @import("io"); +const JSC = @import("../../jsc.zig"); +const PathString = JSC.PathString; +const Environment = bun.Environment; +const C = bun.C; +const Syscall = @import("./syscall.zig"); +const os = std.os; +const Buffer = JSC.MarkedArrayBuffer; +const IdentityContext = @import("../../identity_context.zig").IdentityContext; +const logger = @import("../../logger.zig"); +const Fs = @import("../../fs.zig"); +const URL = @import("../../url.zig").URL; +const Shimmer = @import("../bindings/shimmer.zig").Shimmer; +const is_bindgen: bool = std.meta.globalOption("bindgen", bool) orelse false; +const meta = bun.meta; +/// Time in seconds. Not nanos! +pub const TimeLike = c_int; +pub const Mode = if (Environment.isLinux) u32 else std.os.mode_t; +const heap_allocator = bun.default_allocator; +pub fn DeclEnum(comptime T: type) type { + const fieldInfos = std.meta.declarations(T); + var enumFields: [fieldInfos.len]std.builtin.TypeInfo.EnumField = undefined; + var decls = [_]std.builtin.TypeInfo.Declaration{}; + inline for (fieldInfos) |field, i| { + enumFields[i] = .{ + .name = field.name, + .value = i, + }; + } + return @Type(.{ + .Enum = .{ + .layout = .Auto, + .tag_type = std.math.IntFittingRange(0, fieldInfos.len - 1), + .fields = &enumFields, + .decls = &decls, + .is_exhaustive = true, + }, + }); +} + +pub const FileDescriptor = os.fd_t; +pub const Flavor = enum { + sync, + promise, + callback, + + pub fn Wrap(comptime this: Flavor, comptime Type: type) type { + return comptime brk: { + switch (this) { + .sync => break :brk Type, + // .callback => { + // const Callback = CallbackTask(Type); + // }, + else => @compileError("Not implemented yet"), + } + }; + } +}; + +/// Node.js expects the error to include contextual information +/// - "syscall" +/// - "path" +/// - "errno" +pub fn Maybe(comptime ResultType: type) type { + return union(Tag) { + pub const ReturnType = ResultType; + + err: Syscall.Error, + result: ReturnType, + + pub const retry: @This() = .{ + .err = Syscall.Error.retry, + }; + + pub const Tag = enum { err, result }; + + pub const success: @This() = @This(){ + .result = std.mem.zeroes(ReturnType), + }; + + pub const todo: @This() = @This(){ .err = Syscall.Error.todo }; + + pub fn toJS(this: @This(), globalThis: *JSC.JSGlobalObject) JSC.JSValue { + switch (this) { + .err => |e| { + return e.toJSC(globalThis); + }, + .result => |r| { + if (comptime ReturnType == void) { + return JSC.JSValue.jsUndefined(); + } + + if (comptime ReturnType == JSC.ArrayBuffer) { + return r.toJS(globalThis, null); + } + + if (comptime std.meta.trait.isNumber(ResultType) or std.meta.trait.isFloat(ResultType)) { + return JSC.JSValue.jsNumber(r); + } + + if (comptime std.meta.trait.isZigString(ResultType)) { + if (ResultType == []u8) { + return JSC.ArrayBuffer.fromBytes(r, .ArrayBuffer).toJS(globalThis, null); + } + return JSC.ZigString.init(std.mem.span(r)).withEncoding().toValueAuto(globalThis); + } + + if (comptime @typeInfo(ReturnType) == .Bool) { + return JSC.JSValue.jsBoolean(r); + } + + if (comptime std.meta.trait.isContainer(ReturnType)) { + return r.toJS(globalThis); + } + + @compileError("toJS Not implemented for type " ++ @typeName(ReturnType)); + }, + } + } + + pub fn toArrayBuffer(this: @This(), globalThis: *JSC.JSGlobalObject) JSC.JSValue { + switch (this) { + .err => |e| { + return e.toJSC(globalThis); + }, + .result => |r| { + return JSC.ArrayBuffer.fromBytes(r, .ArrayBuffer).toJS(globalThis, null); + }, + } + } + + pub inline fn getErrno(this: @This()) os.E { + return switch (this) { + .result => os.E.SUCCESS, + .err => |err| @intToEnum(os.E, err.errno), + }; + } + + pub inline fn errno(rc: anytype) ?@This() { + return switch (Syscall.getErrno(rc)) { + .SUCCESS => null, + else => |err| @This(){ + // always truncate + .err = .{ .errno = @truncate(Syscall.Error.Int, @enumToInt(err)) }, + }, + }; + } + + pub inline fn errnoSys(rc: anytype, syscall: Syscall.Tag) ?@This() { + return switch (Syscall.getErrno(rc)) { + .SUCCESS => null, + else => |err| @This(){ + // always truncate + .err = .{ .errno = @truncate(Syscall.Error.Int, @enumToInt(err)), .syscall = syscall }, + }, + }; + } + + pub inline fn errnoSysP(rc: anytype, syscall: Syscall.Tag, path: anytype) ?@This() { + return switch (Syscall.getErrno(rc)) { + .SUCCESS => null, + else => |err| @This(){ + // always truncate + .err = .{ .errno = @truncate(Syscall.Error.Int, @enumToInt(err)), .syscall = syscall, .path = std.mem.span(path) }, + }, + }; + } + }; +} + +pub const StringOrBuffer = union(Tag) { + string: string, + buffer: Buffer, + + pub const Tag = enum { string, buffer }; + + pub fn slice(this: StringOrBuffer) []const u8 { + return switch (this) { + .string => this.string, + .buffer => this.buffer.slice(), + }; + } + + pub export fn external_string_finalizer(_: ?*anyopaque, _: JSC.C.JSStringRef, buffer: *anyopaque, byteLength: usize) void { + bun.default_allocator.free(@ptrCast([*]const u8, buffer)[0..byteLength]); + } + + pub fn toJS(this: StringOrBuffer, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) JSC.C.JSValueRef { + return switch (this) { + .string => { + const input = this.string; + if (strings.toUTF16Alloc(bun.default_allocator, input, false) catch null) |utf16| { + bun.default_allocator.free(bun.constStrToU8(input)); + return JSC.ZigString.toExternalU16(utf16.ptr, utf16.len, ctx.ptr()).asObjectRef(); + } + + return JSC.ZigString.init(input).toExternalValue(ctx.ptr()).asObjectRef(); + }, + .buffer => this.buffer.toJSObjectRef(ctx, exception), + }; + } + + pub fn fromJS(global: *JSC.JSGlobalObject, allocator: std.mem.Allocator, value: JSC.JSValue, exception: JSC.C.ExceptionRef) ?StringOrBuffer { + return switch (value.jsType()) { + JSC.JSValue.JSType.String, JSC.JSValue.JSType.StringObject, JSC.JSValue.JSType.DerivedStringObject, JSC.JSValue.JSType.Object => { + var zig_str = value.toSlice(global, allocator); + return StringOrBuffer{ .string = zig_str.slice() }; + }, + JSC.JSValue.JSType.ArrayBuffer => StringOrBuffer{ + .buffer = Buffer.fromArrayBuffer(global.ref(), value, exception), + }, + JSC.JSValue.JSType.Uint8Array, JSC.JSValue.JSType.DataView => StringOrBuffer{ + .buffer = Buffer.fromArrayBuffer(global.ref(), value, exception), + }, + else => null, + }; + } +}; +pub const ErrorCode = @import("./nodejs_error_code.zig").Code; + +// We can't really use Zig's error handling for syscalls because Node.js expects the "real" errno to be returned +// and various issues with std.os that make it too unstable for arbitrary user input (e.g. how .BADF is marked as unreachable) + +/// https://github.com/nodejs/node/blob/master/lib/buffer.js#L587 +pub const Encoding = enum(u8) { + utf8, + ucs2, + utf16le, + latin1, + ascii, + base64, + base64url, + hex, + + /// Refer to the buffer's encoding + buffer, + + pub fn isBinaryToText(this: Encoding) bool { + return switch (this) { + .hex, .base64, .base64url => true, + else => false, + }; + } + + const Eight = strings.ExactSizeMatcher(8); + /// Caller must verify the value is a string + pub fn fromStringValue(value: JSC.JSValue, global: *JSC.JSGlobalObject) ?Encoding { + var sliced = value.toSlice(global, bun.default_allocator); + defer sliced.deinit(); + return from(sliced.slice()); + } + + /// Caller must verify the value is a string + pub fn from(slice: []const u8) ?Encoding { + return switch (slice.len) { + 0...2 => null, + else => switch (Eight.matchLower(slice)) { + Eight.case("utf-8"), Eight.case("utf8") => Encoding.utf8, + Eight.case("ucs-2"), Eight.case("ucs2") => Encoding.ucs2, + Eight.case("utf16-le"), Eight.case("utf16le") => Encoding.utf16le, + Eight.case("latin1") => Encoding.latin1, + Eight.case("ascii") => Encoding.ascii, + Eight.case("base64") => Encoding.base64, + Eight.case("hex") => Encoding.hex, + Eight.case("buffer") => Encoding.buffer, + else => null, + }, + "base64url".len => brk: { + if (strings.eqlCaseInsensitiveASCII(slice, "base64url", false)) { + break :brk Encoding.base64url; + } + break :brk null; + }, + }; + } + + pub fn encodeWithSize(encoding: Encoding, globalThis: *JSC.JSGlobalObject, comptime size: usize, input: *const [size]u8, exception: JSC.C.ExceptionRef) JSC.JSValue { + switch (encoding) { + .base64 => { + var base64: [std.base64.standard.Encoder.calcSize(size)]u8 = undefined; + const result = JSC.ZigString.init(std.base64.standard.Encoder.encode(&base64, input)).toValueGC(globalThis); + return result; + }, + .base64url => { + var buf: [std.base64.url_safe.Encoder.calcSize(size) + "data:;base64,".len]u8 = undefined; + var encoded = std.base64.url_safe.Encoder.encode(buf["data:;base64,".len..], input); + buf[0.."data:;base64,".len].* = "data:;base64,".*; + + const result = JSC.ZigString.init(buf[0 .. "data:;base64,".len + encoded.len]).toValueGC(globalThis); + return result; + }, + .hex => { + var buf: [size * 4]u8 = undefined; + var out = std.fmt.bufPrint(&buf, "{}", .{std.fmt.fmtSliceHexLower(input)}) catch unreachable; + const result = JSC.ZigString.init(out).toValueGC(globalThis); + return result; + }, + else => { + JSC.throwInvalidArguments("Unexpected encoding", .{}, globalThis.ref(), exception); + return JSC.JSValue.zero; + }, + } + } +}; + +const PathOrBuffer = union(Tag) { + path: PathString, + buffer: Buffer, + + pub const Tag = enum { path, buffer }; + + pub inline fn slice(this: PathOrBuffer) []const u8 { + return this.path.slice(); + } +}; + +pub fn CallbackTask(comptime Result: type) type { + return struct { + callback: JSC.C.JSObjectRef, + option: Option, + success: bool = false, + + pub const Option = union { + err: JSC.SystemError, + result: Result, + }; + }; +} + +pub const PathLike = union(Tag) { + string: PathString, + buffer: Buffer, + url: void, + + pub const Tag = enum { string, buffer, url }; + + pub inline fn slice(this: PathLike) string { + return switch (this) { + .string => this.string.slice(), + .buffer => this.buffer.slice(), + else => unreachable, // TODO: + }; + } + + pub fn sliceZWithForceCopy(this: PathLike, buf: *[bun.MAX_PATH_BYTES]u8, comptime force: bool) [:0]const u8 { + var sliced = this.slice(); + + if (sliced.len == 0) return ""; + + if (comptime !force) { + if (sliced[sliced.len - 1] == 0) { + var sliced_ptr = sliced.ptr; + return sliced_ptr[0 .. sliced.len - 1 :0]; + } + } + + @memcpy(buf, sliced.ptr, sliced.len); + buf[sliced.len] = 0; + return buf[0..sliced.len :0]; + } + + pub inline fn sliceZ(this: PathLike, buf: *[bun.MAX_PATH_BYTES]u8) [:0]const u8 { + return sliceZWithForceCopy(this, buf, false); + } + + pub inline fn sliceZAssume( + this: PathLike, + ) [:0]const u8 { + return std.meta.assumeSentinel(this.slice(), 0); + } + + pub fn toJS(this: PathLike, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) JSC.C.JSValueRef { + return switch (this) { + .string => this.string.toJS(ctx, exception), + .buffer => this.buffer.toJSObjectRef(ctx, exception), + else => unreachable, + }; + } + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?PathLike { + const arg = arguments.next() orelse return null; + switch (arg.jsType()) { + JSC.JSValue.JSType.Uint8Array, + JSC.JSValue.JSType.DataView, + => { + const buffer = Buffer.fromTypedArray(ctx, arg, exception); + if (exception.* != null) return null; + if (!Valid.pathBuffer(buffer, ctx, exception)) return null; + + arguments.protectEat(); + return PathLike{ .buffer = buffer }; + }, + + JSC.JSValue.JSType.ArrayBuffer => { + const buffer = Buffer.fromArrayBuffer(ctx, arg, exception); + if (exception.* != null) return null; + if (!Valid.pathBuffer(buffer, ctx, exception)) return null; + + arguments.protectEat(); + + return PathLike{ .buffer = buffer }; + }, + + JSC.JSValue.JSType.String, + JSC.JSValue.JSType.StringObject, + JSC.JSValue.JSType.DerivedStringObject, + => { + var zig_str = JSC.ZigString.init(""); + arg.toZigString(&zig_str, ctx.ptr()); + + if (!Valid.pathString(zig_str, ctx, exception)) return null; + + arguments.protectEat(); + + if (zig_str.is16Bit()) { + var printed = std.mem.span(std.fmt.allocPrintZ(arguments.arena.allocator(), "{}", .{zig_str}) catch unreachable); + return PathLike{ .string = PathString.init(printed.ptr[0 .. printed.len + 1]) }; + } + + return PathLike{ .string = PathString.init(zig_str.slice()) }; + }, + else => { + if (arg.as(JSC.DOMURL)) |domurl| { + var zig_str = domurl.pathname(); + if (!Valid.pathString(zig_str, ctx, exception)) return null; + + arguments.protectEat(); + + if (zig_str.is16Bit()) { + var printed = std.mem.span(std.fmt.allocPrintZ(arguments.arena.allocator(), "{}", .{zig_str}) catch unreachable); + return PathLike{ .string = PathString.init(printed.ptr[0 .. printed.len + 1]) }; + } + + return PathLike{ .string = PathString.init(zig_str.slice()) }; + } + + return null; + }, + } + } +}; + +pub const Valid = struct { + pub fn fileDescriptor(fd: FileDescriptor, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) bool { + if (fd < 0) { + JSC.throwInvalidArguments("Invalid file descriptor, must not be negative number", .{}, ctx, exception); + return false; + } + + return true; + } + + pub fn pathString(zig_str: JSC.ZigString, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) bool { + switch (zig_str.len) { + 0 => { + JSC.throwInvalidArguments("Invalid path string: can't be empty", .{}, ctx, exception); + return false; + }, + 1...bun.MAX_PATH_BYTES => return true, + else => { + // TODO: should this be an EINVAL? + JSC.throwInvalidArguments( + comptime std.fmt.comptimePrint("Invalid path string: path is too long (max: {d})", .{bun.MAX_PATH_BYTES}), + .{}, + ctx, + exception, + ); + return false; + }, + } + + unreachable; + } + + pub fn pathBuffer(buffer: Buffer, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) bool { + const slice = buffer.slice(); + switch (slice.len) { + 0 => { + JSC.throwInvalidArguments("Invalid path buffer: can't be empty", .{}, ctx, exception); + return false; + }, + + else => { + + // TODO: should this be an EINVAL? + JSC.throwInvalidArguments( + comptime std.fmt.comptimePrint("Invalid path buffer: path is too long (max: {d})", .{bun.MAX_PATH_BYTES}), + .{}, + ctx, + exception, + ); + return false; + }, + 1...bun.MAX_PATH_BYTES => return true, + } + + unreachable; + } +}; + +pub const ArgumentsSlice = struct { + remaining: []const JSC.JSValue, + vm: *JSC.VirtualMachine, + arena: std.heap.ArenaAllocator = std.heap.ArenaAllocator.init(bun.default_allocator), + all: []const JSC.JSValue, + threw: bool = false, + protected: std.bit_set.IntegerBitSet(32) = std.bit_set.IntegerBitSet(32).initEmpty(), + + pub fn unprotect(this: *ArgumentsSlice) void { + var iter = this.protected.iterator(.{}); + var ctx = this.vm.global.ref(); + while (iter.next()) |i| { + JSC.C.JSValueUnprotect(ctx, this.all[i].asObjectRef()); + } + this.protected = std.bit_set.IntegerBitSet(32).initEmpty(); + } + + pub fn deinit(this: *ArgumentsSlice) void { + this.unprotect(); + this.arena.deinit(); + } + + pub fn protectEat(this: *ArgumentsSlice) void { + if (this.remaining.len == 0) return; + const index = this.all.len - this.remaining.len; + this.protected.set(index); + JSC.C.JSValueProtect(this.vm.global.ref(), this.all[index].asObjectRef()); + this.eat(); + } + + pub fn protectEatNext(this: *ArgumentsSlice) ?JSC.JSValue { + if (this.remaining.len == 0) return null; + return this.nextEat(); + } + + pub fn from(vm: *JSC.VirtualMachine, arguments: []const JSC.JSValueRef) ArgumentsSlice { + return init(vm, @ptrCast([*]const JSC.JSValue, arguments.ptr)[0..arguments.len]); + } + pub fn init(vm: *JSC.VirtualMachine, arguments: []const JSC.JSValue) ArgumentsSlice { + return ArgumentsSlice{ + .remaining = arguments, + .vm = vm, + .all = arguments, + }; + } + + pub inline fn len(this: *const ArgumentsSlice) u16 { + return @truncate(u16, this.remaining.len); + } + pub fn eat(this: *ArgumentsSlice) void { + if (this.remaining.len == 0) { + return; + } + + this.remaining = this.remaining[1..]; + } + + pub fn next(this: *ArgumentsSlice) ?JSC.JSValue { + if (this.remaining.len == 0) { + return null; + } + + return this.remaining[0]; + } + + pub fn nextEat(this: *ArgumentsSlice) ?JSC.JSValue { + if (this.remaining.len == 0) { + return null; + } + defer this.eat(); + return this.remaining[0]; + } +}; + +pub fn fileDescriptorFromJS(ctx: JSC.C.JSContextRef, value: JSC.JSValue, exception: JSC.C.ExceptionRef) ?FileDescriptor { + if (!value.isNumber() or value.isBigInt()) return null; + const fd = value.toInt32(); + if (!Valid.fileDescriptor(fd, ctx, exception)) { + return null; + } + + return @truncate(FileDescriptor, fd); +} + +var _get_time_prop_string: ?JSC.C.JSStringRef = null; +pub fn timeLikeFromJS(ctx: JSC.C.JSContextRef, value_: JSC.JSValue, exception: JSC.C.ExceptionRef) ?TimeLike { + var value = value_; + if (JSC.C.JSValueIsDate(ctx, value.asObjectRef())) { + // TODO: make this faster + var get_time_prop = _get_time_prop_string orelse brk: { + var str = JSC.C.JSStringCreateStatic("getTime", "getTime".len); + _get_time_prop_string = str; + break :brk str; + }; + + var getTimeFunction = JSC.C.JSObjectGetProperty(ctx, value.asObjectRef(), get_time_prop, exception); + if (exception.* != null) return null; + value = JSC.JSValue.fromRef(JSC.C.JSObjectCallAsFunction(ctx, getTimeFunction, value.asObjectRef(), 0, null, exception) orelse return null); + if (exception.* != null) return null; + } + + const seconds = value.asNumber(); + if (!std.math.isFinite(seconds)) { + return null; + } + + return @floatToInt(TimeLike, @maximum(@floor(seconds), std.math.minInt(TimeLike))); +} + +pub fn modeFromJS(ctx: JSC.C.JSContextRef, value: JSC.JSValue, exception: JSC.C.ExceptionRef) ?Mode { + const mode_int = if (value.isNumber()) + @truncate(Mode, value.to(Mode)) + else brk: { + if (value.isUndefinedOrNull()) return null; + + // An easier method of constructing the mode is to use a sequence of + // three octal digits (e.g. 765). The left-most digit (7 in the example), + // specifies the permissions for the file owner. The middle digit (6 in + // the example), specifies permissions for the group. The right-most + // digit (5 in the example), specifies the permissions for others. + + var zig_str = JSC.ZigString.init(""); + value.toZigString(&zig_str, ctx.ptr()); + var slice = zig_str.slice(); + if (strings.hasPrefix(slice, "0o")) { + slice = slice[2..]; + } + + break :brk std.fmt.parseInt(Mode, slice, 8) catch { + JSC.throwInvalidArguments("Invalid mode string: must be an octal number", .{}, ctx, exception); + return null; + }; + }; + + if (mode_int < 0 or mode_int > 0o777) { + JSC.throwInvalidArguments("Invalid mode: must be an octal number", .{}, ctx, exception); + return null; + } + + return mode_int; +} + +pub const PathOrFileDescriptor = union(Tag) { + path: PathLike, + fd: FileDescriptor, + + pub const Tag = enum { fd, path }; + + pub fn hash(this: PathOrFileDescriptor) u64 { + return switch (this) { + .path => std.hash.Wyhash.hash(0, this.path.slice()), + .fd => std.hash.Wyhash.hash(0, std.mem.asBytes(&this.fd)), + }; + } + + pub fn copyToStream(this: PathOrFileDescriptor, flags: FileSystemFlags, auto_close: bool, mode: Mode, allocator: std.mem.Allocator, stream: *Stream) !void { + switch (this) { + .fd => |fd| { + stream.content = Stream.Content{ + .file = .{ + .fd = fd, + .flags = flags, + .mode = mode, + }, + }; + stream.content_type = .file; + }, + .path => |path| { + stream.content = Stream.Content{ + .file_path = .{ + .path = PathString.init(std.mem.span(try allocator.dupeZ(u8, path.slice()))), + .auto_close = auto_close, + .file = .{ + .fd = std.math.maxInt(FileDescriptor), + .flags = flags, + .mode = mode, + }, + .opened = false, + }, + }; + stream.content_type = .file_path; + }, + } + } + + pub fn fromJS(ctx: JSC.C.JSContextRef, arguments: *ArgumentsSlice, exception: JSC.C.ExceptionRef) ?PathOrFileDescriptor { + const first = arguments.next() orelse return null; + + if (fileDescriptorFromJS(ctx, first, exception)) |fd| { + arguments.eat(); + return PathOrFileDescriptor{ .fd = fd }; + } + + if (exception.* != null) return null; + + return PathOrFileDescriptor{ .path = PathLike.fromJS(ctx, arguments, exception) orelse return null }; + } + + pub fn toJS(this: PathOrFileDescriptor, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) JSC.C.JSValueRef { + return switch (this) { + .path => this.path.toJS(ctx, exception), + .fd => JSC.JSValue.jsNumberFromInt32(@intCast(i32, this.fd)).asRef(), + }; + } +}; + +pub const FileSystemFlags = enum(Mode) { + /// Open file for appending. The file is created if it does not exist. + @"a" = std.os.O.APPEND, + /// Like 'a' but fails if the path exists. + // @"ax" = std.os.O.APPEND | std.os.O.EXCL, + /// Open file for reading and appending. The file is created if it does not exist. + // @"a+" = std.os.O.APPEND | std.os.O.RDWR, + /// Like 'a+' but fails if the path exists. + // @"ax+" = std.os.O.APPEND | std.os.O.RDWR | std.os.O.EXCL, + /// Open file for appending in synchronous mode. The file is created if it does not exist. + // @"as" = std.os.O.APPEND, + /// Open file for reading and appending in synchronous mode. The file is created if it does not exist. + // @"as+" = std.os.O.APPEND | std.os.O.RDWR, + /// Open file for reading. An exception occurs if the file does not exist. + @"r" = std.os.O.RDONLY, + /// Open file for reading and writing. An exception occurs if the file does not exist. + // @"r+" = std.os.O.RDWR, + /// Open file for reading and writing in synchronous mode. Instructs the operating system to bypass the local file system cache. + /// This is primarily useful for opening files on NFS mounts as it allows skipping the potentially stale local cache. It has a very real impact on I/O performance so using this flag is not recommended unless it is needed. + /// This doesn't turn fs.open() or fsPromises.open() into a synchronous blocking call. If synchronous operation is desired, something like fs.openSync() should be used. + // @"rs+" = std.os.O.RDWR, + /// Open file for writing. The file is created (if it does not exist) or truncated (if it exists). + @"w" = std.os.O.WRONLY | std.os.O.CREAT, + /// Like 'w' but fails if the path exists. + // @"wx" = std.os.O.WRONLY | std.os.O.TRUNC, + // /// Open file for reading and writing. The file is created (if it does not exist) or truncated (if it exists). + // @"w+" = std.os.O.RDWR | std.os.O.CREAT, + // /// Like 'w+' but fails if the path exists. + // @"wx+" = std.os.O.RDWR | std.os.O.EXCL, + + _, + + const O_RDONLY: Mode = std.os.O.RDONLY; + const O_RDWR: Mode = std.os.O.RDWR; + const O_APPEND: Mode = std.os.O.APPEND; + const O_CREAT: Mode = std.os.O.CREAT; + const O_WRONLY: Mode = std.os.O.WRONLY; + const O_EXCL: Mode = std.os.O.EXCL; + const O_SYNC: Mode = 0; + const O_TRUNC: Mode = std.os.O.TRUNC; + + pub fn fromJS(ctx: JSC.C.JSContextRef, val: JSC.JSValue, exception: JSC.C.ExceptionRef) ?FileSystemFlags { + if (val.isUndefinedOrNull()) { + return @intToEnum(FileSystemFlags, O_RDONLY); + } + + if (val.isNumber()) { + const number = val.toInt32(); + if (!(number > 0o000 and number < 0o777)) { + JSC.throwInvalidArguments( + "Invalid integer mode: must be a number between 0o000 and 0o777", + .{}, + ctx, + exception, + ); + return null; + } + return @intToEnum(FileSystemFlags, number); + } + + const jsType = val.jsType(); + if (jsType.isStringLike()) { + var zig_str = JSC.ZigString.init(""); + val.toZigString(&zig_str, ctx.ptr()); + + var buf: [4]u8 = .{ 0, 0, 0, 0 }; + @memcpy(&buf, zig_str.ptr, @minimum(buf.len, zig_str.len)); + const Matcher = strings.ExactSizeMatcher(4); + + // https://github.com/nodejs/node/blob/8c3637cd35cca352794e2c128f3bc5e6b6c41380/lib/internal/fs/utils.js#L565 + const flags = switch (Matcher.match(buf[0..4])) { + Matcher.case("r") => O_RDONLY, + Matcher.case("rs"), Matcher.case("sr") => O_RDONLY | O_SYNC, + Matcher.case("r+") => O_RDWR, + Matcher.case("rs+"), Matcher.case("sr+") => O_RDWR | O_SYNC, + + Matcher.case("w") => O_TRUNC | O_CREAT | O_WRONLY, + Matcher.case("wx"), Matcher.case("xw") => O_TRUNC | O_CREAT | O_WRONLY | O_EXCL, + + Matcher.case("w+") => O_TRUNC | O_CREAT | O_RDWR, + Matcher.case("wx+"), Matcher.case("xw+") => O_TRUNC | O_CREAT | O_RDWR | O_EXCL, + + Matcher.case("a") => O_APPEND | O_CREAT | O_WRONLY, + Matcher.case("ax"), Matcher.case("xa") => O_APPEND | O_CREAT | O_WRONLY | O_EXCL, + Matcher.case("as"), Matcher.case("sa") => O_APPEND | O_CREAT | O_WRONLY | O_SYNC, + + Matcher.case("a+") => O_APPEND | O_CREAT | O_RDWR, + Matcher.case("ax+"), Matcher.case("xa+") => O_APPEND | O_CREAT | O_RDWR | O_EXCL, + Matcher.case("as+"), Matcher.case("sa+") => O_APPEND | O_CREAT | O_RDWR | O_SYNC, + + Matcher.case("") => { + JSC.throwInvalidArguments( + "Invalid flag: string can't be empty", + .{}, + ctx, + exception, + ); + return null; + }, + else => { + JSC.throwInvalidArguments( + "Invalid flag. Learn more at https://nodejs.org/api/fs.html#fs_file_system_flags", + .{}, + ctx, + exception, + ); + return null; + }, + }; + + return @intToEnum(FileSystemFlags, flags); + } + + return null; + } +}; + +/// Milliseconds precision +pub const Date = enum(u64) { + _, + + pub fn toJS(this: Date, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) JSC.C.JSValueRef { + const seconds = @floatCast(f64, @intToFloat(f128, @enumToInt(this)) * 1000.0); + const unix_timestamp = JSC.C.JSValueMakeNumber(ctx, seconds); + const array: [1]JSC.C.JSValueRef = .{unix_timestamp}; + const obj = JSC.C.JSObjectMakeDate(ctx, 1, &array, exception); + return obj; + } +}; + +fn StatsLike(comptime name: [:0]const u8, comptime T: type) type { + return struct { + const This = @This(); + + pub const Class = JSC.NewClass( + This, + .{ .name = name }, + .{ + .isFile = .{ + .rfn = JSC.wrap(This, "isFile", false), + }, + .isDirectory = .{ + .rfn = JSC.wrap(This, "isDirectory", false), + }, + .finalize = finalize, + }, + .{ + .dev = .{ + .get = JSC.To.JS.Getter(This, .dev), + .name = "dev", + }, + .ino = .{ + .get = JSC.To.JS.Getter(This, .ino), + .name = "ino", + }, + .mode = .{ + .get = JSC.To.JS.Getter(This, .mode), + .name = "mode", + }, + .nlink = .{ + .get = JSC.To.JS.Getter(This, .nlink), + .name = "nlink", + }, + .uid = .{ + .get = JSC.To.JS.Getter(This, .uid), + .name = "uid", + }, + .gid = .{ + .get = JSC.To.JS.Getter(This, .gid), + .name = "gid", + }, + .rdev = .{ + .get = JSC.To.JS.Getter(This, .rdev), + .name = "rdev", + }, + .size = .{ + .get = JSC.To.JS.Getter(This, .size), + .name = "size", + }, + .blksize = .{ + .get = JSC.To.JS.Getter(This, .blksize), + .name = "blksize", + }, + .blocks = .{ + .get = JSC.To.JS.Getter(This, .blocks), + .name = "blocks", + }, + .atime = .{ + .get = JSC.To.JS.Getter(This, .atime), + .name = "atime", + }, + .mtime = .{ + .get = JSC.To.JS.Getter(This, .mtime), + .name = "mtime", + }, + .ctime = .{ + .get = JSC.To.JS.Getter(This, .ctime), + .name = "ctime", + }, + .birthtime = .{ + .get = JSC.To.JS.Getter(This, .birthtime), + .name = "birthtime", + }, + .atime_ms = .{ + .get = JSC.To.JS.Getter(This, .atime_ms), + .name = "atimeMs", + }, + .mtime_ms = .{ + .get = JSC.To.JS.Getter(This, .mtime_ms), + .name = "mtimeMs", + }, + .ctime_ms = .{ + .get = JSC.To.JS.Getter(This, .ctime_ms), + .name = "ctimeMs", + }, + .birthtime_ms = .{ + .get = JSC.To.JS.Getter(This, .birthtime_ms), + .name = "birthtimeMs", + }, + }, + ); + + dev: T, + ino: T, + mode: T, + nlink: T, + uid: T, + gid: T, + rdev: T, + size: T, + blksize: T, + blocks: T, + atime_ms: T, + mtime_ms: T, + ctime_ms: T, + birthtime_ms: T, + atime: Date, + mtime: Date, + ctime: Date, + birthtime: Date, + + pub fn init(stat_: os.Stat) @This() { + const atime = stat_.atime(); + const mtime = stat_.mtime(); + const ctime = stat_.ctime(); + return @This(){ + .dev = @truncate(T, @intCast(i64, stat_.dev)), + .ino = @truncate(T, @intCast(i64, stat_.ino)), + .mode = @truncate(T, @intCast(i64, stat_.mode)), + .nlink = @truncate(T, @intCast(i64, stat_.nlink)), + .uid = @truncate(T, @intCast(i64, stat_.uid)), + .gid = @truncate(T, @intCast(i64, stat_.gid)), + .rdev = @truncate(T, @intCast(i64, stat_.rdev)), + .size = @truncate(T, @intCast(i64, stat_.size)), + .blksize = @truncate(T, @intCast(i64, stat_.blksize)), + .blocks = @truncate(T, @intCast(i64, stat_.blocks)), + .atime_ms = @truncate(T, @intCast(i64, if (atime.tv_nsec > 0) (@intCast(usize, atime.tv_nsec) / std.time.ns_per_ms) else 0)), + .mtime_ms = @truncate(T, @intCast(i64, if (mtime.tv_nsec > 0) (@intCast(usize, mtime.tv_nsec) / std.time.ns_per_ms) else 0)), + .ctime_ms = @truncate(T, @intCast(i64, if (ctime.tv_nsec > 0) (@intCast(usize, ctime.tv_nsec) / std.time.ns_per_ms) else 0)), + .atime = @intToEnum(Date, @intCast(u64, @maximum(atime.tv_sec, 0))), + .mtime = @intToEnum(Date, @intCast(u64, @maximum(mtime.tv_sec, 0))), + .ctime = @intToEnum(Date, @intCast(u64, @maximum(ctime.tv_sec, 0))), + + // Linux doesn't include this info in stat + // maybe it does in statx, but do you really need birthtime? If you do please file an issue. + .birthtime_ms = if (Environment.isLinux) + 0 + else + @truncate(T, @intCast(i64, if (stat_.birthtime().tv_nsec > 0) (@intCast(usize, stat_.birthtime().tv_nsec) / std.time.ns_per_ms) else 0)), + + .birthtime = if (Environment.isLinux) + @intToEnum(Date, 0) + else + @intToEnum(Date, @intCast(u64, @maximum(stat_.birthtime().tv_sec, 0))), + }; + } + + pub fn isFile(this: *Stats) JSC.JSValue { + return JSC.JSValue.jsBoolean(os.S.ISREG(@intCast(Mode, this.mode))); + } + pub fn isDirectory(this: *Stats) JSC.JSValue { + return JSC.JSValue.jsBoolean(os.S.ISDIR(@intCast(Mode, this.mode))); + } + + pub fn toJS(this: Stats, ctx: JSC.C.JSContextRef, _: JSC.C.ExceptionRef) JSC.C.JSValueRef { + var _this = bun.default_allocator.create(Stats) catch unreachable; + _this.* = this; + return Class.make(ctx, _this); + } + + pub fn finalize(this: *Stats) void { + bun.default_allocator.destroy(this); + } + }; +} + +pub const Stats = StatsLike("Stats", i32); +pub const BigIntStats = StatsLike("BigIntStats", i64); + +/// A class representing a directory stream. +/// +/// Created by {@link opendir}, {@link opendirSync}, or `fsPromises.opendir()`. +/// +/// ```js +/// import { opendir } from 'fs/promises'; +/// +/// try { +/// const dir = await opendir('./'); +/// for await (const dirent of dir) +/// console.log(dirent.name); +/// } catch (err) { +/// console.error(err); +/// } +/// ``` +/// +/// When using the async iterator, the `fs.Dir` object will be automatically +/// closed after the iterator exits. +/// @since v12.12.0 +pub const DirEnt = struct { + name: PathString, + // not publicly exposed + kind: Kind, + + pub const Kind = std.fs.File.Kind; + + pub fn isBlockDevice( + this: *DirEnt, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + _: []const JSC.C.JSValueRef, + _: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + return JSC.C.JSValueMakeBoolean(ctx, this.kind == std.fs.File.Kind.BlockDevice); + } + pub fn isCharacterDevice( + this: *DirEnt, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + _: []const JSC.C.JSValueRef, + _: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + return JSC.C.JSValueMakeBoolean(ctx, this.kind == std.fs.File.Kind.CharacterDevice); + } + pub fn isDirectory( + this: *DirEnt, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + _: []const JSC.C.JSValueRef, + _: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + return JSC.C.JSValueMakeBoolean(ctx, this.kind == std.fs.File.Kind.Directory); + } + pub fn isFIFO( + this: *DirEnt, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + _: []const JSC.C.JSValueRef, + _: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + return JSC.C.JSValueMakeBoolean(ctx, this.kind == std.fs.File.Kind.NamedPipe or this.kind == std.fs.File.Kind.EventPort); + } + pub fn isFile( + this: *DirEnt, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + _: []const JSC.C.JSValueRef, + _: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + return JSC.C.JSValueMakeBoolean(ctx, this.kind == std.fs.File.Kind.File); + } + pub fn isSocket( + this: *DirEnt, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + _: []const JSC.C.JSValueRef, + _: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + return JSC.C.JSValueMakeBoolean(ctx, this.kind == std.fs.File.Kind.UnixDomainSocket); + } + pub fn isSymbolicLink( + this: *DirEnt, + ctx: JSC.C.JSContextRef, + _: JSC.C.JSObjectRef, + _: JSC.C.JSObjectRef, + _: []const JSC.C.JSValueRef, + _: JSC.C.ExceptionRef, + ) JSC.C.JSValueRef { + return JSC.C.JSValueMakeBoolean(ctx, this.kind == std.fs.File.Kind.SymLink); + } + + pub const Class = JSC.NewClass(DirEnt, .{ .name = "DirEnt" }, .{ + .isBlockDevice = .{ + .name = "isBlockDevice", + .rfn = isBlockDevice, + }, + .isCharacterDevice = .{ + .name = "isCharacterDevice", + .rfn = isCharacterDevice, + }, + .isDirectory = .{ + .name = "isDirectory", + .rfn = isDirectory, + }, + .isFIFO = .{ + .name = "isFIFO", + .rfn = isFIFO, + }, + .isFile = .{ + .name = "isFile", + .rfn = isFile, + }, + .isSocket = .{ + .name = "isSocket", + .rfn = isSocket, + }, + .isSymbolicLink = .{ + .name = "isSymbolicLink", + .rfn = isSymbolicLink, + }, + }, .{ + .name = .{ + .get = JSC.To.JS.Getter(DirEnt, .name), + .name = "name", + }, + }); + + pub fn finalize(this: *DirEnt) void { + bun.default_allocator.free(this.name.slice()); + bun.default_allocator.destroy(this); + } +}; + +pub const Emitter = struct { + pub const Listener = struct { + once: bool = false, + callback: JSC.JSValue, + + pub const List = struct { + pub const ArrayList = std.MultiArrayList(Listener); + list: ArrayList = ArrayList{}, + once_count: u32 = 0, + + pub fn append(this: *List, allocator: std.mem.Allocator, ctx: JSC.C.JSContextRef, listener: Listener) !void { + JSC.C.JSValueProtect(ctx, listener.callback.asObjectRef()); + try this.list.append(allocator, listener); + this.once_count +|= @as(u32, @boolToInt(listener.once)); + } + + pub fn prepend(this: *List, allocator: std.mem.Allocator, ctx: JSC.C.JSContextRef, listener: Listener) !void { + JSC.C.JSValueProtect(ctx, listener.callback.asObjectRef()); + try this.list.ensureUnusedCapacity(allocator, 1); + this.list.insertAssumeCapacity(0, listener); + this.once_count +|= @as(u32, @boolToInt(listener.once)); + } + + // removeListener() will remove, at most, one instance of a listener from the + // listener array. If any single listener has been added multiple times to the + // listener array for the specified eventName, then removeListener() must be + // called multiple times to remove each instance. + pub fn remove(this: *List, ctx: JSC.C.JSContextRef, callback: JSC.JSValue) bool { + const callbacks = this.list.items(.callback); + + for (callbacks) |item, i| { + if (callback.eqlValue(item)) { + JSC.C.JSValueUnprotect(ctx, callback.asObjectRef()); + this.once_count -|= @as(u32, @boolToInt(this.list.items(.once)[i])); + this.list.orderedRemove(i); + return true; + } + } + + return false; + } + + pub fn emit(this: *List, globalThis: *JSC.JSGlobalObject, value: JSC.JSValue) void { + var i: usize = 0; + outer: while (true) { + var slice = this.list.slice(); + var callbacks = slice.items(.callback); + var once = slice.items(.once); + while (i < callbacks.len) : (i += 1) { + const callback = callbacks[i]; + + globalThis.enqueueMicrotask1( + callback, + value, + ); + + if (once[i]) { + this.once_count -= 1; + JSC.C.JSValueUnprotect(globalThis.ref(), callback.asObjectRef()); + this.list.orderedRemove(i); + slice = this.list.slice(); + callbacks = slice.items(.callback); + once = slice.items(.once); + continue :outer; + } + } + + return; + } + } + }; + }; + + pub fn New(comptime EventType: type) type { + return struct { + const EventEmitter = @This(); + pub const Map = std.enums.EnumArray(EventType, Listener.List); + listeners: Map = Map.initFill(Listener.List{}), + + pub fn addListener(this: *EventEmitter, ctx: JSC.C.JSContextRef, event: EventType, listener: Emitter.Listener) !void { + try this.listeners.getPtr(event).append(bun.default_allocator, ctx, listener); + } + + pub fn prependListener(this: *EventEmitter, ctx: JSC.C.JSContextRef, event: EventType, listener: Emitter.Listener) !void { + try this.listeners.getPtr(event).prepend(bun.default_allocator, ctx, listener); + } + + pub fn emit(this: *EventEmitter, event: EventType, globalThis: *JSC.JSGlobalObject, value: JSC.JSValue) void { + this.listeners.getPtr(event).emit(globalThis, value); + } + + pub fn removeListener(this: *EventEmitter, ctx: JSC.C.JSContextRef, event: EventType, callback: JSC.JSValue) bool { + return this.listeners.getPtr(event).remove(ctx, callback); + } + }; + } +}; + +// pub fn Untag(comptime Union: type) type { +// const info: std.builtin.TypeInfo.Union = @typeInfo(Union); +// const tag = info.tag_type orelse @compileError("Must be tagged"); +// return struct { +// pub const Tag = tag; +// pub const Union = +// }; +// } + +pub const Stream = struct { + sink_type: Sink.Type, + sink: Sink, + content: Content, + content_type: Content.Type, + allocator: std.mem.Allocator, + + pub fn open(this: *Stream) ?JSC.Node.Syscall.Error { + switch (Syscall.open(this.content.file_path.path.sliceAssumeZ(), @enumToInt(this.content.file_path.file.flags))) { + .err => |err| { + return err.withPath(this.content.file_path.path.slice()); + }, + .result => |fd| { + this.content.file_path.file.fd = fd; + this.content.file_path.opened = true; + this.emit(.open); + return null; + }, + } + } + + pub fn getFd(this: *Stream) FileDescriptor { + return switch (this.content_type) { + .file => this.content.file.fd, + .file_path => if (comptime Environment.allow_assert) brk: { + std.debug.assert(this.content.file_path.opened); + break :brk this.content.file_path.file.fd; + } else this.content.file_path.file.fd, + else => unreachable, + }; + } + + pub fn close(this: *Stream) ?JSC.Node.Syscall.Error { + const fd = this.getFd(); + + // Don't ever close stdin, stdout, or stderr + // we are assuming that these are always 0 1 2, which is not strictly true in some cases + if (fd <= 2) { + return null; + } + + if (Syscall.close(fd)) |err| { + return err; + } + + switch (this.content_type) { + .file_path => { + this.content.file_path.opened = false; + this.content.file_path.file.fd = std.math.maxInt(FileDescriptor); + }, + .file => { + this.content.file.fd = std.math.maxInt(FileDescriptor); + }, + else => {}, + } + + this.emit(.Close); + } + + const CommonEvent = enum { Error, Open, Close }; + pub fn emit(this: *Stream, comptime event: CommonEvent) void { + switch (this.sink_type) { + .readable => { + switch (comptime event) { + .Open => this.sink.readable.emit(.Open), + .Close => this.sink.readable.emit(.Close), + else => unreachable, + } + }, + .writable => { + switch (comptime event) { + .Open => this.sink.writable.emit(.Open), + .Close => this.sink.writable.emit(.Close), + else => unreachable, + } + }, + } + } + + // This allocates a new stream object + pub fn toJS(this: *Stream, ctx: JSC.C.JSContextRef, _: JSC.C.ExceptionRef) JSC.C.JSValueRef { + switch (this.sink_type) { + .readable => { + var readable = &this.sink.readable.state; + return readable.create( + ctx.ptr(), + ).asObjectRef(); + }, + .writable => { + var writable = &this.sink.writable.state; + return writable.create( + ctx.ptr(), + ).asObjectRef(); + }, + } + } + + pub fn deinit(this: *Stream) void { + this.allocator.destroy(this); + } + + pub const Sink = union { + readable: Readable, + writable: Writable, + + pub const Type = enum(u8) { + readable, + writable, + }; + }; + + pub const Consumed = u52; + + const Response = struct { + bytes: [8]u8 = std.mem.zeroes([8]u8), + }; + + const Error = union(Type) { + Syscall: Syscall.Error, + JavaScript: JSC.JSValue, + Internal: anyerror, + + pub const Type = enum { + Syscall, + JavaScript, + Internal, + }; + }; + + pub const Content = union { + file: File, + file_path: FilePath, + socket: Socket, + buffer: *Buffer, + stream: *Stream, + javascript: JSC.JSValue, + + pub fn getFile(this: *Content, content_type: Content.Type) *File { + return switch (content_type) { + .file => &this.file, + .file_path => &this.file_path.file, + else => unreachable, + }; + } + + pub const File = struct { + fd: FileDescriptor, + flags: FileSystemFlags, + mode: Mode, + size: Consumed = std.math.maxInt(Consumed), + + // pub fn read(this: *File, comptime chunk_type: Content.Type, chunk: Source.Type.of(chunk_type)) Response {} + + pub inline fn setPermissions(this: File) meta.ReturnOf(Syscall.fchmod) { + return Syscall.fchmod(this.fd, this.mode); + } + }; + + pub const FilePath = struct { + path: PathString, + auto_close: bool = false, + file: File = File{ .fd = std.math.maxInt(FileDescriptor), .mode = 0o666, .flags = FileSystemFlags.@"r" }, + opened: bool = false, + + // pub fn read(this: *File, comptime chunk_type: Content.Type, chunk: Source.Type.of(chunk_type)) Response {} + }; + + pub const Socket = struct { + fd: FileDescriptor, + flags: FileSystemFlags, + + // pub fn write(this: *File, comptime chunk_type: Source.Type, chunk: Source.Type.of(chunk_type)) Response {} + // pub fn read(this: *File, comptime chunk_type: Source.Type, chunk: Source.Type.of(chunk_type)) Response {} + }; + + pub const Type = enum(u8) { + file, + file_path, + socket, + buffer, + stream, + javascript, + }; + }; +}; + +pub const Writable = struct { + state: State = State{}, + emitter: EventEmitter = EventEmitter{}, + + connection: ?*Stream = null, + globalObject: ?*JSC.JSGlobalObject = null, + + // workaround https://github.com/ziglang/zig/issues/6611 + stream: *Stream = undefined, + pipeline: Pipeline = Pipeline{}, + started: bool = false, + + pub const Chunk = struct { + data: StringOrBuffer, + encoding: Encoding = Encoding.utf8, + + pub fn init(allocator: std.mem.Allocator, size: u32) !Chunk { + var bytes = try allocator.alloc(u8, size); + return Chunk{ + .data = JSC.ArrayBuffer.fromBytes(bytes, JSC.JSValue.JSType.Uint8Array), + }; + } + }; + + pub const Pipe = struct { + source: *Stream, + destination: *Stream, + chunk: ?*Chunk = null, + // Might be the end of the stream + // or it might just be another stream + next: ?*Pipe = null, + + pub fn start(this: *Pipe, pipeline: *Pipeline, chunk: ?*Chunk) void { + this.run(pipeline, chunk, null); + } + + var disable_clonefile = false; + + fn runCloneFileWithFallback(pipeline: *Pipeline, source: *Stream.Content, destination: *Stream.Content) void { + switch (Syscall.clonefile(source.path.sliceAssumeZ(), destination.path.sliceAssumeZ())) { + .result => return, + .err => |err| { + switch (err.getErrno()) { + // these are retryable + .ENOTSUP, .EXDEV, .EXIST, .EIO, .ENOTDIR => |call| { + if (call == .ENOTSUP) { + disable_clonefile = true; + } + + return runCopyfile( + false, + pipeline, + source, + .file_path, + destination, + .file_path, + ); + }, + else => { + pipeline.err = err; + return; + }, + } + }, + } + } + + fn runCopyfile( + must_open_files: bool, + pipeline: *Pipeline, + source: *Stream.Content, + source_type: Stream.Content.Type, + destination: *Stream.Content, + destination_type: Stream.Content.Type, + is_end: bool, + ) void { + do_the_work: { + // fallback-only + if (destination_type == .file_path and source_type == .file_path and !destination.file_path.opened and !must_open_files) { + switch (Syscall.copyfile(source.path.sliceAssumeZ(), destination.path.sliceAssumeZ(), 0)) { + .err => |err| { + pipeline.err = err; + + return; + }, + .result => break :do_the_work, + } + } + + defer { + if (source_type == .file_path and source.file_path.auto_close and source.file_path.opened) { + if (source.stream.close()) |err| { + if (pipeline.err == null) { + pipeline.err = err; + } + } + } + + if (is_end and destination_type == .file_path and destination.file_path.auto_close and destination.file_path.opened) { + if (destination.stream.close()) |err| { + if (pipeline.err == null) { + pipeline.err = err; + } + } + } + } + + if (source_type == .file_path and !source.file_path.opened) { + if (source.stream.open()) |err| { + pipeline.err = err; + return; + } + } + + const source_fd = if (source_type == .file_path) + source.file_path.file.fd + else + source.file.fd; + + if (destination == .file_path and !destination.file_path.opened) { + if (destination.stream.open()) |err| { + pipeline.err = err; + return; + } + } + + const dest_fd = if (destination_type == .file_path) + destination.file_path.file.fd + else + destination.file.fd; + + switch (Syscall.fcopyfile(source_fd, dest_fd, 0)) { + .err => |err| { + pipeline.err = err; + return; + }, + .result => break :do_the_work, + } + } + + switch (destination.getFile(destination_type).setPermissions()) { + .err => |err| { + destination.stream.emitError(err); + pipeline.err = err; + return; + }, + .result => return, + } + } + + // fn runGeneric(this: *Pipe, pipeline: *Pipeline) !void { + // var source = this.source; + // var destination = this.destination; + // const source_content_type = source.content_type; + // const destination_content_type = destination.content_type; + + // if (this.chunk == null) { + // this.chunk = try this.source.allocator.create(Chunk); + // this.chunk.?.* = try Chunk.init(this.source.allocator, this.source.sink.readable.state.highwater_mark); + // } + + // source.readInto + // } + + pub fn run(this: *Pipe, pipeline: *Pipeline) void { + var source = this.source; + var destination = this.destination; + const source_content_type = source.content_type; + const destination_content_type = destination.content_type; + + if (pipeline.err != null) return; + + switch (FastPath.get( + source_content_type, + destination_content_type, + pipeline.head == this, + pipeline.tail == this, + )) { + .clonefile => { + if (comptime !Environment.isMac) unreachable; + if (destination.content.file_path.opened) { + runCopyfile( + // Can we skip sending a .open event? + (!source.content.file_path.auto_close and !source.content.file_path.opened) or (!destination.content.file_path.auto_close and !destination.content.file_path.opened), + pipeline, + &source.content, + .file_path, + &destination.content, + .file_path, + this.next == null, + ); + } else { + runCloneFileWithFallback(pipeline, source.content.file_path, destination.content.file_path); + } + }, + .copyfile => { + if (comptime !Environment.isMac) unreachable; + runCopyfile( + // Can we skip sending a .open event? + (!source.content.file_path.auto_close and !source.content.file_path.opened) or (!destination.content.file_path.auto_close and !destination.content.file_path.opened), + pipeline, + &source.content, + source_content_type, + &destination.content, + destination_content_type, + this.next == null, + ); + }, + else => {}, + } + } + + pub const FastPath = enum { + none, + clonefile, + sendfile, + copyfile, + copy_file_range, + + pub fn get(source: Stream.Content.Type, destination: Stream.Content.Type, is_head: bool, is_tail: bool) FastPath { + _ = is_tail; + if (comptime Environment.isMac) { + if (is_head) { + if (source == .file_path and destination == .file_path and !disable_clonefile) + return .clonefile; + + if ((source == .file or source == .file_path) and (destination == .file or destination == .file_path)) { + return .copyfile; + } + } + } + + return FastPath.none; + } + }; + }; + + pub const Pipeline = struct { + head: ?*Pipe = null, + tail: ?*Pipe = null, + + // Preallocate a single pipe so that + preallocated_tail_pipe: Pipe = undefined, + + /// Does the data exit at any point to JavaScript? + closed_loop: bool = true, + + // If there is a pending error, this is the error + err: ?Syscall.Error = null, + + pub const StartTask = struct { + writable: *Writable, + pub fn run(this: *StartTask) void { + var writable = this.writable; + var head = writable.pipeline.head orelse return; + if (writable.started) { + return; + } + writable.started = true; + + head.start(&writable.pipeline, null); + } + }; + }; + + pub fn appendReadable(this: *Writable, readable: *Stream) void { + if (comptime Environment.allow_assert) { + std.debug.assert(readable.sink_type == .readable); + } + + if (this.pipeline.tail == null) { + this.pipeline.head = &this.pipeline.preallocated_tail_pipe; + this.pipeline.head.?.* = Pipe{ + .destination = this.stream, + .source = readable, + }; + this.pipeline.tail = this.pipeline.head; + return; + } + + var pipe = readable.allocator.create(Pipe) catch unreachable; + pipe.* = Pipe{ + .source = readable, + .destination = this.stream, + }; + this.pipeline.tail.?.next = pipe; + this.pipeline.tail = pipe; + } + + pub const EventEmitter = Emitter.New(Events); + + pub fn emit(this: *Writable, event: Events, value: JSC.JSValue) void { + if (this.shouldSkipEvent(event)) return; + + this.emitter.emit(event, this.globalObject.?, value); + } + + pub inline fn shouldEmitEvent(this: *const Writable, event: Events) bool { + return switch (event) { + .Close => this.state.emit_close and this.emitter.listeners.get(.Close).list.len > 0, + .Drain => this.emitter.listeners.get(.Drain).list.len > 0, + .Error => this.emitter.listeners.get(.Error).list.len > 0, + .Finish => this.emitter.listeners.get(.Finish).list.len > 0, + .Pipe => this.emitter.listeners.get(.Pipe).list.len > 0, + .Unpipe => this.emitter.listeners.get(.Unpipe).list.len > 0, + .Open => this.emitter.listeners.get(.Open).list.len > 0, + }; + } + + pub const State = extern struct { + highwater_mark: u32 = 256_000, + encoding: Encoding = Encoding.utf8, + start: i32 = 0, + destroyed: bool = false, + ended: bool = false, + corked: bool = false, + finished: bool = false, + emit_close: bool = true, + + pub fn deinit(state: *State) callconv(.C) void { + if (comptime is_bindgen) return; + + var stream = state.getStream(); + stream.deinit(); + } + + pub fn create(state: *State, globalObject: *JSC.JSGlobalObject) callconv(.C) JSC.JSValue { + return shim.cppFn("create", .{ state, globalObject }); + } + + // i know. + pub inline fn getStream(state: *State) *Stream { + return getWritable(state).stream; + } + + pub inline fn getWritable(state: *State) *Writable { + return @fieldParentPtr(Writable, "state", state); + } + + pub fn addEventListener(state: *State, global: *JSC.JSGlobalObject, event: Events, callback: JSC.JSValue, is_once: bool) callconv(.C) void { + if (comptime is_bindgen) return; + var writable = state.getWritable(); + writable.emitter.addListener(global.ref(), event, .{ + .once = is_once, + .callback = callback, + }) catch unreachable; + } + + pub fn removeEventListener(state: *State, global: *JSC.JSGlobalObject, event: Events, callback: JSC.JSValue) callconv(.C) bool { + if (comptime is_bindgen) return true; + var writable = state.getWritable(); + return writable.emitter.removeListener(global.ref(), event, callback); + } + + pub fn prependEventListener(state: *State, global: *JSC.JSGlobalObject, event: Events, callback: JSC.JSValue, is_once: bool) callconv(.C) void { + if (comptime is_bindgen) return; + var writable = state.getWritable(); + writable.emitter.prependListener(global.ref(), event, .{ + .once = is_once, + .callback = callback, + }) catch unreachable; + } + + pub fn write(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + pub fn end(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + pub fn close(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + pub fn destroy(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + pub fn cork(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + pub fn uncork(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + + pub const Flowing = enum(u8) { + pending, + yes, + paused, + }; + + pub const shim = Shimmer("Bun", "Writable", @This()); + pub const name = "Bun__Writable"; + pub const include = "BunStream.h"; + pub const namespace = shim.namespace; + + pub const Export = shim.exportFunctions(.{ + .@"deinit" = deinit, + .@"addEventListener" = addEventListener, + .@"removeEventListener" = removeEventListener, + .@"prependEventListener" = prependEventListener, + .@"write" = write, + .@"end" = end, + .@"close" = close, + .@"destroy" = destroy, + .@"cork" = cork, + .@"uncork" = uncork, + }); + + pub const Extern = [_][]const u8{"create"}; + + comptime { + if (!is_bindgen) { + @export(deinit, .{ .name = Export[0].symbol_name }); + @export(addEventListener, .{ .name = Export[1].symbol_name }); + @export(removeEventListener, .{ .name = Export[2].symbol_name }); + @export(prependEventListener, .{ .name = Export[3].symbol_name }); + @export(write, .{ .name = Export[4].symbol_name }); + @export(end, .{ .name = Export[5].symbol_name }); + @export(close, .{ .name = Export[6].symbol_name }); + @export(destroy, .{ .name = Export[7].symbol_name }); + @export(cork, .{ .name = Export[8].symbol_name }); + @export(uncork, .{ .name = Export[9].symbol_name }); + } + } + }; + + pub const Events = enum(u8) { + Close, + Drain, + Error, + Finish, + Pipe, + Unpipe, + Open, + + pub const name = "WritableEvent"; + }; +}; + +pub const Readable = struct { + state: State = State{}, + emitter: EventEmitter = EventEmitter{}, + stream: *Stream = undefined, + destination: ?*Writable = null, + globalObject: ?*JSC.JSGlobalObject = null, + + pub const EventEmitter = Emitter.New(Events); + + pub fn emit(this: *Readable, event: Events, comptime ValueType: type, value: JSC.JSValue) void { + _ = ValueType; + if (this.shouldEmitEvent(event)) return; + + this.emitter.emit(event, this.globalObject.?, value); + } + + pub fn shouldEmitEvent(this: *Readable, event: Events) bool { + return switch (event) { + .Close => this.state.emit_close and this.emitter.listeners.get(.Close).list.len > 0, + .Data => this.emitter.listeners.get(.Data).list.len > 0, + .End => this.state.emit_end and this.emitter.listeners.get(.End).list.len > 0, + .Error => this.emitter.listeners.get(.Error).list.len > 0, + .Pause => this.emitter.listeners.get(.Pause).list.len > 0, + .Readable => this.emitter.listeners.get(.Readable).list.len > 0, + .Resume => this.emitter.listeners.get(.Resume).list.len > 0, + .Open => this.emitter.listeners.get(.Open).list.len > 0, + }; + } + + pub const Events = enum(u8) { + Close, + Data, + End, + Error, + Pause, + Readable, + Resume, + Open, + + pub const name = "ReadableEvent"; + }; + + // This struct is exposed to JavaScript + pub const State = extern struct { + highwater_mark: u32 = 256_000, + encoding: Encoding = Encoding.utf8, + + start: i32 = 0, + end: i32 = std.math.maxInt(i32), + + readable: bool = false, + aborted: bool = false, + did_read: bool = false, + ended: bool = false, + flowing: Flowing = Flowing.pending, + + emit_close: bool = true, + emit_end: bool = true, + + // i know. + pub inline fn getStream(state: *State) *Stream { + return getReadable(state).stream; + } + + pub inline fn getReadable(state: *State) *Readable { + return @fieldParentPtr(Readable, "state", state); + } + + pub const Flowing = enum(u8) { + pending, + yes, + paused, + }; + + pub const shim = Shimmer("Bun", "Readable", @This()); + pub const name = "Bun__Readable"; + pub const include = "BunStream.h"; + pub const namespace = shim.namespace; + + pub fn create( + state: *State, + globalObject: *JSC.JSGlobalObject, + ) callconv(.C) JSC.JSValue { + return shim.cppFn("create", .{ state, globalObject }); + } + + pub fn deinit(state: *State) callconv(.C) void { + if (comptime is_bindgen) return; + var stream = state.getStream(); + stream.deinit(); + } + + pub fn addEventListener(state: *State, global: *JSC.JSGlobalObject, event: Events, callback: JSC.JSValue, is_once: bool) callconv(.C) void { + if (comptime is_bindgen) return; + var readable = state.getReadable(); + + readable.emitter.addListener(global.ref(), event, .{ + .once = is_once, + .callback = callback, + }) catch unreachable; + } + + pub fn removeEventListener(state: *State, global: *JSC.JSGlobalObject, event: Events, callback: JSC.JSValue) callconv(.C) bool { + if (comptime is_bindgen) return true; + var readable = state.getReadable(); + return readable.emitter.removeListener(global.ref(), event, callback); + } + + pub fn prependEventListener(state: *State, global: *JSC.JSGlobalObject, event: Events, callback: JSC.JSValue, is_once: bool) callconv(.C) void { + if (comptime is_bindgen) return; + var readable = state.getReadable(); + readable.emitter.prependListener(global.ref(), event, .{ + .once = is_once, + .callback = callback, + }) catch unreachable; + } + + pub fn pipe(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + if (len < 1) { + return JSC.toInvalidArguments("Writable is required", .{}, global.ref()); + } + const args: []const JSC.JSValue = args_ptr[0..len]; + var writable_state: *Writable.State = args[0].getWritableStreamState(global.vm()) orelse { + return JSC.toInvalidArguments("Expected Writable but didn't receive it", .{}, global.ref()); + }; + writable_state.getWritable().appendReadable(state.getStream()); + return JSC.JSValue.jsUndefined(); + } + + pub fn unpipe(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + + pub fn unshift(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + + pub fn read(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + + pub fn pause(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + + pub fn @"resume"(state: *State, global: *JSC.JSGlobalObject, args_ptr: [*]const JSC.JSValue, len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + _ = state; + _ = global; + _ = args_ptr; + _ = len; + + return JSC.JSValue.jsUndefined(); + } + + pub const Export = shim.exportFunctions(.{ + .@"deinit" = deinit, + .@"addEventListener" = addEventListener, + .@"removeEventListener" = removeEventListener, + .@"prependEventListener" = prependEventListener, + .@"pipe" = pipe, + .@"unpipe" = unpipe, + .@"unshift" = unshift, + .@"read" = read, + .@"pause" = pause, + .@"resume" = State.@"resume", + }); + + pub const Extern = [_][]const u8{"create"}; + + comptime { + if (!is_bindgen) { + @export(deinit, .{ + .name = Export[0].symbol_name, + }); + @export(addEventListener, .{ + .name = Export[1].symbol_name, + }); + @export(removeEventListener, .{ + .name = Export[2].symbol_name, + }); + @export(prependEventListener, .{ + .name = Export[3].symbol_name, + }); + @export( + pipe, + .{ .name = Export[4].symbol_name }, + ); + @export( + unpipe, + .{ .name = Export[5].symbol_name }, + ); + @export( + unshift, + .{ .name = Export[6].symbol_name }, + ); + @export( + read, + .{ .name = Export[7].symbol_name }, + ); + @export( + pause, + .{ .name = Export[8].symbol_name }, + ); + @export( + State.@"resume", + .{ .name = Export[9].symbol_name }, + ); + } + } + }; +}; + +pub const Path = struct { + pub const shim = Shimmer("Bun", "Path", @This()); + pub const name = "Bun__Path"; + pub const include = "Path.h"; + pub const namespace = shim.namespace; + const PathHandler = @import("../../resolver/resolve_path.zig"); + const StringBuilder = @import("../../string_builder.zig"); + pub const code = @embedFile("../path.exports.js"); + + pub fn create(globalObject: *JSC.JSGlobalObject, isWindows: bool) callconv(.C) JSC.JSValue { + return shim.cppFn("create", .{ globalObject, isWindows }); + } + + pub fn basename(globalThis: *JSC.JSGlobalObject, isWindows: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + if (args_len == 0) { + return JSC.toInvalidArguments("path is required", .{}, globalThis.ref()); + } + var stack_fallback = std.heap.stackFallback(4096, JSC.getAllocator(globalThis.ref())); + var allocator = stack_fallback.get(); + + var arguments: []JSC.JSValue = args_ptr[0..args_len]; + var path = arguments[0].toSlice(globalThis, allocator); + + defer path.deinit(); + var extname_ = if (args_len > 1) arguments[1].toSlice(globalThis, allocator) else JSC.ZigString.Slice.empty; + defer extname_.deinit(); + + var base_slice = path.slice(); + var out: []const u8 = base_slice; + + if (!isWindows) { + out = std.fs.path.basenamePosix(base_slice); + } else { + out = std.fs.path.basenameWindows(base_slice); + } + const ext = extname_.slice(); + + if ((ext.len != out.len or out.len == base_slice.len) and strings.endsWith(out, ext)) { + out = out[0 .. out.len - ext.len]; + } + + return JSC.ZigString.init(out).withEncoding().toValueGC(globalThis); + } + pub fn dirname(globalThis: *JSC.JSGlobalObject, isWindows: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + if (args_len == 0) { + return JSC.toInvalidArguments("path is required", .{}, globalThis.ref()); + } + var stack_fallback = std.heap.stackFallback(4096, JSC.getAllocator(globalThis.ref())); + var allocator = stack_fallback.get(); + + var arguments: []JSC.JSValue = args_ptr[0..args_len]; + var path = arguments[0].toSlice(globalThis, allocator); + defer path.deinit(); + + const base_slice = path.slice(); + + const out = if (!isWindows) + std.fs.path.dirnameWindows(base_slice) orelse "C:\\" + else + std.fs.path.dirnamePosix(base_slice) orelse "/"; + + return JSC.ZigString.init(out).toValueGC(globalThis); + } + pub fn extname(globalThis: *JSC.JSGlobalObject, _: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + if (args_len == 0) { + return JSC.toInvalidArguments("path is required", .{}, globalThis.ref()); + } + var stack_fallback = std.heap.stackFallback(4096, JSC.getAllocator(globalThis.ref())); + var allocator = stack_fallback.get(); + var arguments: []JSC.JSValue = args_ptr[0..args_len]; + + var path = arguments[0].toSlice(globalThis, allocator); + defer path.deinit(); + + const base_slice = path.slice(); + + return JSC.ZigString.init(std.fs.path.extension(base_slice)).toValueGC(globalThis); + } + pub fn format(globalThis: *JSC.JSGlobalObject, isWindows: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + if (args_len == 0) { + return JSC.toInvalidArguments("pathObject is required", .{}, globalThis.ref()); + } + var path_object: JSC.JSValue = args_ptr[0]; + const js_type = path_object.jsType(); + if (!js_type.isObject()) { + return JSC.toInvalidArguments("pathObject is required", .{}, globalThis.ref()); + } + + var stack_fallback = std.heap.stackFallback(4096, JSC.getAllocator(globalThis.ref())); + var allocator = stack_fallback.get(); + var dir = JSC.ZigString.Empty; + var name_ = JSC.ZigString.Empty; + var ext = JSC.ZigString.Empty; + var name_with_ext = JSC.ZigString.Empty; + + var insert_separator = true; + if (path_object.get(globalThis, "dir")) |prop| { + prop.toZigString(&dir, globalThis); + insert_separator = !dir.isEmpty(); + } else if (path_object.get(globalThis, "root")) |prop| { + prop.toZigString(&dir, globalThis); + } + + if (path_object.get(globalThis, "base")) |prop| { + prop.toZigString(&name_with_ext, globalThis); + } else { + var had_ext = false; + if (path_object.get(globalThis, "ext")) |prop| { + prop.toZigString(&ext, globalThis); + had_ext = !ext.isEmpty(); + } + + if (path_object.get(globalThis, "name")) |prop| { + if (had_ext) { + prop.toZigString(&name_, globalThis); + } else { + prop.toZigString(&name_with_ext, globalThis); + } + } + } + + if (dir.isEmpty()) { + if (!name_with_ext.isEmpty()) { + return name_with_ext.toValueAuto(globalThis); + } + + if (name_.isEmpty()) { + return JSC.ZigString.Empty.toValue(globalThis); + } + const out = std.fmt.allocPrint(allocator, "{s}{s}", .{ name_, ext }) catch unreachable; + defer allocator.free(out); + + return JSC.ZigString.init(out).withEncoding().toValueGC(globalThis); + } + + if (insert_separator) { + const separator = if (!isWindows) "/" else "\\"; + if (name_with_ext.isEmpty()) { + const out = std.fmt.allocPrint(allocator, "{}{s}{}{}", .{ dir, separator, name_, ext }) catch unreachable; + defer allocator.free(out); + return JSC.ZigString.init(out).withEncoding().toValueGC(globalThis); + } + + { + const out = std.fmt.allocPrint(allocator, "{}{s}{}", .{ + dir, + separator, + name_with_ext, + }) catch unreachable; + defer allocator.free(out); + return JSC.ZigString.init(out).withEncoding().toValueGC(globalThis); + } + } + + if (name_with_ext.isEmpty()) { + const out = std.fmt.allocPrint(allocator, "{}{}{}", .{ dir, name_, ext }) catch unreachable; + defer allocator.free(out); + return JSC.ZigString.init(out).withEncoding().toValueGC(globalThis); + } + + { + const out = std.fmt.allocPrint(allocator, "{}{}", .{ + dir, + name_with_ext, + }) catch unreachable; + defer allocator.free(out); + return JSC.ZigString.init(out).withEncoding().toValueGC(globalThis); + } + } + fn isAbsoluteString(path: JSC.ZigString, windows: bool) bool { + if (!windows) return path.len > 0 and path.slice()[0] == '/'; + + return isZigStringAbsoluteWindows(path); + } + pub fn isAbsolute(globalThis: *JSC.JSGlobalObject, isWindows: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + if (args_len == 0) return JSC.JSValue.jsBoolean(false); + var zig_str: JSC.ZigString = args_ptr[0].getZigString(globalThis); + if (zig_str.isEmpty()) return JSC.JSValue.jsBoolean(false); + return JSC.JSValue.jsBoolean(isAbsoluteString(zig_str, isWindows)); + } + fn isZigStringAbsoluteWindows(zig_str: JSC.ZigString) bool { + if (zig_str.is16Bit()) { + var buf = [4]u16{ 0, 0, 0, 0 }; + var u16_slice = zig_str.utf16Slice(); + + buf[0] = u16_slice[0]; + if (u16_slice.len > 1) + buf[1] = u16_slice[1]; + + if (u16_slice.len > 2) + buf[2] = u16_slice[2]; + + if (u16_slice.len > 3) + buf[3] = u16_slice[3]; + + return std.fs.path.isAbsoluteWindowsWTF16(buf[0..@minimum(u16_slice.len, buf.len)]); + } + + return std.fs.path.isAbsoluteWindows(zig_str.slice()); + } + pub fn join( + globalThis: *JSC.JSGlobalObject, + isWindows: bool, + args_ptr: [*]JSC.JSValue, + args_len: u16, + ) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + if (args_len == 0) return JSC.ZigString.init("").toValue(globalThis); + + var stack_fallback_allocator = std.heap.stackFallback( + (32 * @sizeOf(string)), + heap_allocator, + ); + var allocator = stack_fallback_allocator.get(); + var arena = std.heap.ArenaAllocator.init(heap_allocator); + var arena_allocator = arena.allocator(); + defer arena.deinit(); + var buf: [bun.MAX_PATH_BYTES]u8 = undefined; + var to_join = allocator.alloc(string, args_len) catch unreachable; + var possibly_utf16 = false; + for (args_ptr[0..args_len]) |arg, i| { + const zig_str: JSC.ZigString = arg.getZigString(globalThis); + if (zig_str.is16Bit()) { + // TODO: remove this string conversion + to_join[i] = zig_str.toSlice(arena_allocator).slice(); + possibly_utf16 = true; + } else { + to_join[i] = zig_str.slice(); + } + } + + const out = if (!isWindows) + PathHandler.joinStringBuf(&buf, to_join, .posix) + else + PathHandler.joinStringBuf(&buf, to_join, .windows); + + var out_str = JSC.ZigString.init(out); + if (possibly_utf16) { + out_str.setOutputEncoding(); + } + + return out_str.toValueGC(globalThis); + } + pub fn normalize(globalThis: *JSC.JSGlobalObject, isWindows: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + if (args_len == 0) return JSC.ZigString.init("").toValue(globalThis); + + var zig_str: JSC.ZigString = args_ptr[0].getZigString(globalThis); + if (zig_str.len == 0) return JSC.ZigString.init("").toValue(globalThis); + + var buf: [bun.MAX_PATH_BYTES]u8 = undefined; + var str_slice = zig_str.toSlice(heap_allocator); + defer str_slice.deinit(); + var str = str_slice.slice(); + + const out = if (!isWindows) + PathHandler.normalizeStringNode(str, &buf, .posix) + else + PathHandler.normalizeStringNode(str, &buf, .windows); + + var out_str = JSC.ZigString.init(out); + if (str_slice.allocated) out_str.setOutputEncoding(); + return out_str.toValueGC(globalThis); + } + pub fn parse(globalThis: *JSC.JSGlobalObject, isWindows: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + if (args_len == 0 or !args_ptr[0].jsType().isStringLike()) { + return JSC.toInvalidArguments("path string is required", .{}, globalThis.ref()); + } + var path_slice: JSC.ZigString.Slice = args_ptr[0].toSlice(globalThis, heap_allocator); + defer path_slice.deinit(); + var path = path_slice.slice(); + var path_name = Fs.PathName.init(path); + var root = JSC.ZigString.init(path_name.dir); + const is_absolute = (isWindows and isZigStringAbsoluteWindows(root)) or (!isWindows and path_name.dir.len > 0 and path_name.dir[0] == '/'); + + var dir = JSC.ZigString.init(path_name.dir); + if (is_absolute) { + root = JSC.ZigString.Empty; + if (path_name.dir.len == 0) + dir = JSC.ZigString.init(if (isWindows) std.fs.path.sep_str_windows else std.fs.path.sep_str_posix); + } + + var base = JSC.ZigString.init(path_name.base); + var name_ = JSC.ZigString.init(path_name.filename); + var ext = JSC.ZigString.init(path_name.ext); + dir.setOutputEncoding(); + root.setOutputEncoding(); + base.setOutputEncoding(); + name_.setOutputEncoding(); + ext.setOutputEncoding(); + var entries = [10]JSC.ZigString{ + JSC.ZigString.init("dir"), + JSC.ZigString.init("root"), + JSC.ZigString.init("base"), + JSC.ZigString.init("name"), + JSC.ZigString.init("ext"), + dir, + root, + base, + name_, + ext, + }; + + var keys: []JSC.ZigString = entries[0..5]; + var values: []JSC.ZigString = entries[5..10]; + return JSC.JSValue.fromEntries(globalThis, keys.ptr, values.ptr, 5, true); + } + pub fn relative(globalThis: *JSC.JSGlobalObject, isWindows: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + var arguments = args_ptr[0..args_len]; + + if (args_len > 1 and JSC.JSValue.eqlValue(args_ptr[0], args_ptr[1])) + return JSC.ZigString.init("").toValue(globalThis); + + var from_slice: JSC.ZigString.Slice = if (args_len > 0) arguments[0].toSlice(globalThis, heap_allocator) else JSC.ZigString.Slice.empty; + defer from_slice.deinit(); + var to_slice: JSC.ZigString.Slice = if (args_len > 1) arguments[1].toSlice(globalThis, heap_allocator) else JSC.ZigString.Slice.empty; + defer to_slice.deinit(); + + var from = from_slice.slice(); + var to = to_slice.slice(); + + var out = if (!isWindows) + PathHandler.relativePlatform(from, to, .posix, true) + else + PathHandler.relativePlatform(from, to, .windows, true); + + var out_str = JSC.ZigString.init(out); + if (from_slice.allocated or to_slice.allocated) out_str.setOutputEncoding(); + return out_str.toValueGC(globalThis); + } + + pub fn resolve(globalThis: *JSC.JSGlobalObject, isWindows: bool, args_ptr: [*]JSC.JSValue, args_len: u16) callconv(.C) JSC.JSValue { + if (comptime is_bindgen) return JSC.JSValue.jsUndefined(); + + var stack_fallback_allocator = std.heap.stackFallback( + (32 * @sizeOf(string)), + heap_allocator, + ); + var allocator = stack_fallback_allocator.get(); + var out_buf: [bun.MAX_PATH_BYTES * 2]u8 = undefined; + + var parts = allocator.alloc(string, args_len) catch unreachable; + defer allocator.free(parts); + + var arena = std.heap.ArenaAllocator.init(heap_allocator); + var arena_allocator = arena.allocator(); + defer arena.deinit(); + + var i: u16 = 0; + while (i < args_len) : (i += 1) { + parts[i] = args_ptr[i].toSlice(globalThis, arena_allocator).slice(); + } + + var out: JSC.ZigString = if (!isWindows) + JSC.ZigString.init(PathHandler.joinAbsStringBuf(Fs.FileSystem.instance.top_level_dir, &out_buf, parts, .posix)) + else + JSC.ZigString.init(PathHandler.joinAbsStringBuf(Fs.FileSystem.instance.top_level_dir, &out_buf, parts, .windows)); + + out.len = strings.withoutTrailingSlash(out.slice()).len; + + if (arena.state.buffer_list.first != null) + out.setOutputEncoding(); + + return out.toValueGC(globalThis); + } + + pub const Export = shim.exportFunctions(.{ + .@"basename" = basename, + .@"dirname" = dirname, + .@"extname" = extname, + .@"format" = format, + .@"isAbsolute" = isAbsolute, + .@"join" = join, + .@"normalize" = normalize, + .@"parse" = parse, + .@"relative" = relative, + .@"resolve" = resolve, + }); + + pub const Extern = [_][]const u8{"create"}; + + comptime { + if (!is_bindgen) { + @export(Path.basename, .{ + .name = Export[0].symbol_name, + }); + @export(Path.dirname, .{ + .name = Export[1].symbol_name, + }); + @export(Path.extname, .{ + .name = Export[2].symbol_name, + }); + @export(Path.format, .{ + .name = Export[3].symbol_name, + }); + @export(Path.isAbsolute, .{ + .name = Export[4].symbol_name, + }); + @export(Path.join, .{ + .name = Export[5].symbol_name, + }); + @export(Path.normalize, .{ + .name = Export[6].symbol_name, + }); + @export(Path.parse, .{ + .name = Export[7].symbol_name, + }); + @export(Path.relative, .{ + .name = Export[8].symbol_name, + }); + @export(Path.resolve, .{ + .name = Export[9].symbol_name, + }); + } + } +}; + +pub const Process = struct { + pub fn getArgv(globalObject: *JSC.JSGlobalObject) callconv(.C) JSC.JSValue { + var vm = globalObject.bunVM(); + if (vm.argv.len == 0) + return JSC.JSValue.createStringArray(globalObject, null, 0, false); + + // Allocate up to 32 strings in stack + var stack_fallback_allocator = std.heap.stackFallback( + 32 * @sizeOf(JSC.ZigString), + heap_allocator, + ); + var allocator = stack_fallback_allocator.get(); + + // If it was launched with bun run or bun test, skip it + const skip: usize = @as(usize, @boolToInt( + vm.argv.len > 1 and (strings.eqlComptime(vm.argv[0], "run") or strings.eqlComptime(vm.argv[0], "wiptest")), + )); + + var args = allocator.alloc( + JSC.ZigString, + vm.argv.len + 1, + ) catch unreachable; + var args_list = std.ArrayListUnmanaged(JSC.ZigString){ .items = args, .capacity = args.len }; + args_list.items.len = 0; + defer allocator.free(args); + { + var args_iterator = std.process.args(); + + if (args_iterator.next()) |arg0| { + var argv0 = JSC.ZigString.init(std.mem.span(arg0)); + argv0.setOutputEncoding(); + // https://github.com/yargs/yargs/blob/adb0d11e02c613af3d9427b3028cc192703a3869/lib/utils/process-argv.ts#L1 + args_list.appendAssumeCapacity(argv0); + } + } + + if (vm.argv.len > skip) { + for (vm.argv[skip..]) |arg| { + var str = JSC.ZigString.init(arg); + str.setOutputEncoding(); + args_list.appendAssumeCapacity(str); + } + } + + return JSC.JSValue.createStringArray(globalObject, args_list.items.ptr, args_list.items.len, true); + } + + pub fn getCwd(globalObject: *JSC.JSGlobalObject) callconv(.C) JSC.JSValue { + var buffer: [bun.MAX_PATH_BYTES]u8 = undefined; + switch (Syscall.getcwd(&buffer)) { + .err => |err| { + return err.toJSC(globalObject); + }, + .result => |result| { + var zig_str = JSC.ZigString.init(result); + zig_str.setOutputEncoding(); + + const value = zig_str.toValueGC(globalObject); + + return value; + }, + } + } + pub fn setCwd(globalObject: *JSC.JSGlobalObject, to: *JSC.ZigString) callconv(.C) JSC.JSValue { + if (to.len == 0) { + return JSC.toInvalidArguments("path is required", .{}, globalObject.ref()); + } + + var buf: [bun.MAX_PATH_BYTES]u8 = undefined; + const slice = to.sliceZBuf(&buf) catch { + return JSC.toInvalidArguments("Invalid path", .{}, globalObject.ref()); + }; + + const result = Syscall.chdir(slice); + + switch (result) { + .err => |err| { + return err.toJSC(globalObject); + }, + .result => { + // When we update the cwd from JS, we have to update the bundler's version as well + // However, this might be called many times in a row, so we use a pre-allocated buffer + // that way we don't have to worry about garbage collector + JSC.VirtualMachine.vm.bundler.fs.top_level_dir = std.os.getcwd(&JSC.VirtualMachine.vm.bundler.fs.top_level_dir_buf) catch { + _ = Syscall.chdir(std.meta.assumeSentinel(JSC.VirtualMachine.vm.bundler.fs.top_level_dir, 0)); + return JSC.toInvalidArguments("Invalid path", .{}, globalObject.ref()); + }; + + JSC.VirtualMachine.vm.bundler.fs.top_level_dir_buf[JSC.VirtualMachine.vm.bundler.fs.top_level_dir.len] = std.fs.path.sep; + JSC.VirtualMachine.vm.bundler.fs.top_level_dir_buf[JSC.VirtualMachine.vm.bundler.fs.top_level_dir.len + 1] = 0; + JSC.VirtualMachine.vm.bundler.fs.top_level_dir = JSC.VirtualMachine.vm.bundler.fs.top_level_dir_buf[0 .. JSC.VirtualMachine.vm.bundler.fs.top_level_dir.len + 1]; + + return JSC.JSValue.jsUndefined(); + }, + } + } + + pub fn exit(_: *JSC.JSGlobalObject, code: i32) callconv(.C) void { + std.os.exit(@truncate(u8, @intCast(u32, @maximum(code, 0)))); + } + + pub export const Bun__version: [:0]const u8 = "v" ++ bun.Global.package_json_version; + pub export const Bun__versions_mimalloc: [:0]const u8 = bun.Global.versions.mimalloc; + pub export const Bun__versions_webkit: [:0]const u8 = bun.Global.versions.webkit; + pub export const Bun__versions_libarchive: [:0]const u8 = bun.Global.versions.libarchive; + pub export const Bun__versions_picohttpparser: [:0]const u8 = bun.Global.versions.picohttpparser; + pub export const Bun__versions_boringssl: [:0]const u8 = bun.Global.versions.boringssl; + pub export const Bun__versions_zlib: [:0]const u8 = bun.Global.versions.zlib; + pub export const Bun__versions_zig: [:0]const u8 = bun.Global.versions.zig; +}; + +comptime { + std.testing.refAllDecls(Process); + std.testing.refAllDecls(Stream); + std.testing.refAllDecls(Readable); + std.testing.refAllDecls(Path); + std.testing.refAllDecls(Writable); + std.testing.refAllDecls(Writable.State); + std.testing.refAllDecls(Readable.State); +} |