diff options
author | 2021-10-14 18:55:41 -0700 | |
---|---|---|
committer | 2021-10-14 18:55:41 -0700 | |
commit | bbc1bcbed125e4aeacac0c374f717f65adb838ea (patch) | |
tree | a3ae72a500afc507231d3f97c7d0762c76614a51 /src | |
parent | 3ed824fe0fc14d21a5c035d84891b8ecf28e3c44 (diff) | |
download | bun-bbc1bcbed125e4aeacac0c374f717f65adb838ea.tar.gz bun-bbc1bcbed125e4aeacac0c374f717f65adb838ea.tar.zst bun-bbc1bcbed125e4aeacac0c374f717f65adb838ea.zip |
Support local templates
Diffstat (limited to 'src')
-rw-r--r-- | src/cli/create_command.zig | 193 | ||||
-rw-r--r-- | src/copy_file.zig | 50 | ||||
-rw-r--r-- | src/string_immutable.zig | 8 | ||||
-rw-r--r-- | src/walker_skippable.zig | 147 |
4 files changed, 381 insertions, 17 deletions
diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig index d501e0473..d2a780430 100644 --- a/src/cli/create_command.zig +++ b/src/cli/create_command.zig @@ -31,7 +31,16 @@ const NPMClient = @import("../which_npm_client.zig").NPMClient; const which = @import("../which.zig").which; const clap = @import("clap"); +const CopyFile = @import("../copy_file.zig"); var bun_path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + +const skip_dirs = &[_]string{ "node_modules", ".git" }; +const skip_files = &[_]string{ + "package-lock.json", + "yarn.lock", + "pnpm-lock.yaml", +}; + var bun_path: ?[:0]const u8 = null; fn execTask(allocator: *std.mem.Allocator, task_: string, cwd: string, PATH: string, npm_client: NPMClient) void { const task = std.mem.trim(u8, task_, " \n\r\t"); @@ -156,6 +165,8 @@ const CreateOptions = struct { } }; +const BUN_CREATE_DIR = ".bun-create"; +var home_dir_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; pub const CreateCommand = struct { var client: HTTPClient = undefined; var extracting_name_buf: [1024]u8 = undefined; @@ -172,7 +183,44 @@ pub const CreateCommand = struct { env_loader.loadProcess(); - const template = positionals[0]; + const template = brk: { + var positional = positionals[0]; + + if (!std.fs.path.isAbsolute(positional)) { + outer: { + if (env_loader.map.get("BUN_CREATE_DIR")) |home_dir| { + var parts = [_]string{ home_dir, positional }; + var outdir_path = filesystem.absBuf(&parts, &home_dir_buf); + home_dir_buf[outdir_path.len] = 0; + var outdir_path_ = home_dir_buf[0..outdir_path.len :0]; + std.fs.accessAbsoluteZ(outdir_path_, .{}) catch break :outer; + break :brk outdir_path; + } + } + + outer: { + var parts = [_]string{ filesystem.top_level_dir, BUN_CREATE_DIR, positional }; + var outdir_path = filesystem.absBuf(&parts, &home_dir_buf); + home_dir_buf[outdir_path.len] = 0; + var outdir_path_ = home_dir_buf[0..outdir_path.len :0]; + std.fs.accessAbsoluteZ(outdir_path_, .{}) catch break :outer; + break :brk outdir_path; + } + + outer: { + if (env_loader.map.get("HOME")) |home_dir| { + var parts = [_]string{ home_dir, BUN_CREATE_DIR, positional }; + var outdir_path = filesystem.absBuf(&parts, &home_dir_buf); + home_dir_buf[outdir_path.len] = 0; + var outdir_path_ = home_dir_buf[0..outdir_path.len :0]; + std.fs.accessAbsoluteZ(outdir_path_, .{}) catch break :outer; + break :brk outdir_path; + } + } + } + + break :brk positional; + }; const dirname = positionals[1]; var filename_writer = filesystem.dirname_store; const destination = try filesystem.dirname_store.append([]const u8, resolve_path.joinAbs(filesystem.top_level_dir, .auto, dirname)); @@ -196,7 +244,9 @@ pub const CreateCommand = struct { var package_json_contents: MutableString = undefined; var package_json_file: std.fs.File = undefined; - if (!std.fs.path.isAbsolute(template)) { + const is_remote_template = !std.fs.path.isAbsolute(template); + + if (is_remote_template) { var tarball_bytes: MutableString = try Example.fetch(ctx, template, &progress, &node); node.end(); @@ -293,7 +343,8 @@ pub const CreateCommand = struct { package_json_contents = plucker.contents; package_json_file = std.fs.File{ .handle = plucker.fd }; } else { - const template_dir = std.fs.openDirAbsolute(template, .{ .iterate = true }) catch |err| { + var template_parts = [_]string{template}; + const template_dir = std.fs.openDirAbsolute(filesystem.abs(&template_parts), .{ .iterate = true }) catch |err| { node.end(); progress.root.end(); progress.refresh(); @@ -314,19 +365,33 @@ pub const CreateCommand = struct { std.os.exit(1); }; - var walker = try template_dir.walk(ctx.allocator); + const Walker = @import("../walker_skippable.zig"); + var walker = try Walker.walk(template_dir, ctx.allocator, skip_files, skip_dirs); defer walker.deinit(); + while (try walker.next()) |entry| { // TODO: make this not walk these folders entirely // rather than checking each file path..... - if (entry.kind != .File or - std.mem.indexOf(u8, entry.path, "node_modules") != null or - std.mem.indexOf(u8, entry.path, ".git") != null) continue; - - entry.dir.copyFile(entry.basename, destination_dir, entry.path, .{}) catch { + if (entry.kind != .File) continue; + var outfile = destination_dir.createFile(entry.path, .{}) catch brk: { if (std.fs.path.dirname(entry.path)) |entry_dirname| { destination_dir.makePath(entry_dirname) catch {}; } + break :brk destination_dir.createFile(entry.path, .{}) catch |err| { + node.end(); + progress.root.end(); + progress.refresh(); + + Output.prettyErrorln("<r><red>{s}<r>: copying file {s}", .{ @errorName(err), entry.path }); + Output.flush(); + std.os.exit(1); + }; + }; + defer outfile.close(); + + var infile = try entry.dir.openFile(entry.basename, .{ .read = true }); + defer infile.close(); + CopyFile.copy(infile.handle, outfile.handle) catch { entry.dir.copyFile(entry.basename, destination_dir, entry.path, .{}) catch |err| { node.end(); progress.root.end(); @@ -337,6 +402,8 @@ pub const CreateCommand = struct { std.os.exit(1); }; }; + var stat = outfile.stat() catch continue; + _ = C.fchmod(outfile.handle, stat.mode); } package_json_file = destination_dir.openFile("package.json", .{ .read = true, .write = true }) catch |err| { @@ -368,7 +435,9 @@ pub const CreateCommand = struct { std.os.exit(1); } package_json_contents = try MutableString.init(ctx.allocator, stat.size); - package_json_contents.inflate(package_json_file.readAll(package_json_contents.list.items) catch |err| { + package_json_contents.list.expandToCapacity(); + + _ = package_json_file.preadAll(package_json_contents.list.items, 0) catch |err| { node.end(); progress.root.end(); progress.refresh(); @@ -376,7 +445,12 @@ pub const CreateCommand = struct { Output.prettyErrorln("Error reading package.json: <r><red>{s}", .{@errorName(err)}); Output.flush(); std.os.exit(1); - }) catch unreachable; + }; + // The printer doesn't truncate, so we must do so manually + std.os.ftruncate(package_json_file.handle, 0) catch {}; + + js_ast.Expr.Data.Store.create(default_allocator); + js_ast.Stmt.Data.Store.create(default_allocator); } var source = logger.Source.initPathString("package.json", package_json_contents.list.items); @@ -679,6 +753,7 @@ pub const Example = struct { name: string, version: string, description: string, + local: bool = false, var client: HTTPClient = undefined; const examples_url: string = "https://registry.npmjs.org/bun-examples-all/latest"; @@ -848,7 +923,7 @@ pub const Example = struct { return thread.buffer; } - pub fn fetchAll(ctx: Command.Context) ![]const Example { + pub fn fetchAll(ctx: Command.Context) ![]Example { url = URL.parse(examples_url); client = HTTPClient.init(ctx.allocator, .GET, url, .{}, ""); client.timeout = timeout; @@ -934,19 +1009,103 @@ pub const Example = struct { pub const CreateListExamplesCommand = struct { pub fn exec(ctx: Command.Context) !void { + var filesystem = try fs.FileSystem.init1(ctx.allocator, null); + var env_loader: DotEnv.Loader = brk: { + var map = try ctx.allocator.create(DotEnv.Map); + map.* = DotEnv.Map.init(ctx.allocator); + + break :brk DotEnv.Loader.init(map, ctx.allocator); + }; + + env_loader.loadProcess(); + const time = std.time.nanoTimestamp(); - const examples = try Example.fetchAll(ctx); + const remote_examples = try Example.fetchAll(ctx); + + var examples = std.ArrayList(Example).fromOwnedSlice(ctx.allocator, remote_examples); + { + var folders = [3]std.fs.Dir{ std.fs.Dir{ .fd = 0 }, std.fs.Dir{ .fd = 0 }, std.fs.Dir{ .fd = 0 } }; + if (env_loader.map.get("BUN_CREATE_DIR")) |home_dir| { + var parts = [_]string{home_dir}; + var outdir_path = filesystem.absBuf(&parts, &home_dir_buf); + folders[0] = std.fs.openDirAbsolute(outdir_path, .{ .iterate = true }) catch std.fs.Dir{ .fd = 0 }; + } + + { + var parts = [_]string{ filesystem.top_level_dir, BUN_CREATE_DIR }; + var outdir_path = filesystem.absBuf(&parts, &home_dir_buf); + folders[1] = std.fs.openDirAbsolute(outdir_path, .{ .iterate = true }) catch std.fs.Dir{ .fd = 0 }; + } + + if (env_loader.map.get("HOME")) |home_dir| { + var parts = [_]string{ home_dir, BUN_CREATE_DIR }; + var outdir_path = filesystem.absBuf(&parts, &home_dir_buf); + folders[2] = std.fs.openDirAbsolute(outdir_path, .{ .iterate = true }) catch std.fs.Dir{ .fd = 0 }; + } + + // subfolders with package.json + for (folders) |folder_| { + if (folder_.fd != 0) { + const folder: std.fs.Dir = folder_; + var iter = folder.iterate(); + + loop: while (iter.next() catch null) |entry_| { + const entry: std.fs.Dir.Entry = entry_; + + switch (entry.kind) { + .Directory => { + inline for (skip_dirs) |skip_dir| { + if (strings.eqlComptime(entry.name, skip_dir)) { + continue :loop; + } + } + + std.mem.copy(u8, &home_dir_buf, entry.name); + home_dir_buf[entry.name.len] = std.fs.path.sep; + std.mem.copy(u8, home_dir_buf[entry.name.len + 1 ..], "package.json"); + home_dir_buf[entry.name.len + 1 + "package.json".len] = 0; + + var path: [:0]u8 = home_dir_buf[0 .. entry.name.len + 1 + "package.json".len :0]; + + folder.accessZ(path, .{ + .read = true, + }) catch continue :loop; + + try examples.append( + Example{ + .name = try filesystem.filename_store.append(@TypeOf(entry.name), entry.name), + .version = "", + .local = true, + .description = "", + }, + ); + continue :loop; + }, + else => continue, + } + } + } + } + } Output.printStartEnd(time, std.time.nanoTimestamp()); Output.prettyln(" <d>Fetched examples<r>", .{}); - Output.prettyln("Welcome to Bun! Create a new project by pasting any of the following:\n\n", .{}); Output.flush(); - Example.print(examples); + Example.print(examples.items); - _ = try CreateOptions.parse(ctx.allocator, true); + if (env_loader.map.get("HOME")) |homedir| { + Output.prettyln( + "<d>This command is completely optional. To add a new local template, create a folder in {s}/.bun-create/. To publish a new template, git clone https://github.com/jarred-sumner/bun, add a new folder to the \"examples\" folder, and submit a PR.<r>", + .{homedir}, + ); + } else { + Output.prettyln( + "<d>This command is completely optional. To add a new local template, create a folder in $HOME/.bun-create/. To publish a new template, git clone https://github.com/jarred-sumner/bun, add a new folder to the \"examples\" folder, and submit a PR.<r>", + .{}, + ); + } - Output.pretty("<d>To add a new template, git clone https://github.com/jarred-sumner/bun, add a new folder to the \"examples\" folder, and submit a PR.<r>", .{}); Output.flush(); } }; diff --git a/src/copy_file.zig b/src/copy_file.zig new file mode 100644 index 000000000..57738363f --- /dev/null +++ b/src/copy_file.zig @@ -0,0 +1,50 @@ +const std = @import("std"); +const os = std.os; +const math = std.math; + +const CopyFileError = error{SystemResources} || os.CopyFileRangeError || os.SendFileError; + +// Transfer all the data between two file descriptors in the most efficient way. +// The copy starts at offset 0, the initial offsets are preserved. +// No metadata is transferred over. +pub fn copy(fd_in: os.fd_t, fd_out: os.fd_t) CopyFileError!void { + if (comptime std.Target.current.isDarwin()) { + const rc = os.system.fcopyfile(fd_in, fd_out, null, os.system.COPYFILE_DATA); + switch (os.errno(rc)) { + .SUCCESS => return, + .INVAL => unreachable, + .NOMEM => return error.SystemResources, + // The source file is not a directory, symbolic link, or regular file. + // Try with the fallback path before giving up. + .OPNOTSUPP => {}, + else => |err| return os.unexpectedErrno(err), + } + } + + if (std.Target.current.os.tag == .linux) { + // Try copy_file_range first as that works at the FS level and is the + // most efficient method (if available). + var offset: u64 = 0; + cfr_loop: while (true) { + // The kernel checks the u64 value `offset+count` for overflow, use + // a 32 bit value so that the syscall won't return EINVAL except for + // impossibly large files (> 2^64-1 - 2^32-1). + const amt = try os.copy_file_range(fd_in, offset, fd_out, offset, math.maxInt(u32), 0); + // Terminate when no data was copied + if (amt == 0) break :cfr_loop; + offset += amt; + } + return; + } + + // Sendfile is a zero-copy mechanism iff the OS supports it, otherwise the + // fallback code will copy the contents chunk by chunk. + const empty_iovec = [0]os.iovec_const{}; + var offset: u64 = 0; + sendfile_loop: while (true) { + const amt = try os.sendfile(fd_out, fd_in, offset, 0, &empty_iovec, &empty_iovec, 0); + // Terminate when no data was copied + if (amt == 0) break :sendfile_loop; + offset += amt; + } +} diff --git a/src/string_immutable.zig b/src/string_immutable.zig index bbdea59cf..c9168d336 100644 --- a/src/string_immutable.zig +++ b/src/string_immutable.zig @@ -388,6 +388,14 @@ inline fn eqlComptimeCheckLen(self: string, comptime alt: anytype, comptime chec const second = comptime std.mem.readIntNative(u64, alt[8..16]); return ((comptime !check_len) or self.len == alt.len) and first == std.mem.readIntNative(u64, self[0..8]) and second == std.mem.readIntNative(u64, self[8..16]); }, + 17 => { + const first = comptime std.mem.readIntNative(u64, alt[0..8]); + const second = comptime std.mem.readIntNative(u64, alt[8..16]); + return ((comptime !check_len) or self.len == alt.len) and + first == std.mem.readIntNative(u64, self[0..8]) and second == + std.mem.readIntNative(u64, self[8..16]) and + alt[16] == self[16]; + }, 23 => { const first = comptime std.mem.readIntNative(u64, alt[0..8]); const second = comptime std.mem.readIntNative(u64, alt[8..15]); diff --git a/src/walker_skippable.zig b/src/walker_skippable.zig new file mode 100644 index 000000000..809901bc0 --- /dev/null +++ b/src/walker_skippable.zig @@ -0,0 +1,147 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const Walker = @This(); +const path = std.fs.path; + +stack: std.ArrayList(StackItem), +name_buffer: std.ArrayList(u8), +skip_filenames: []const u64 = &[_]u64{}, +skip_dirnames: []const u64 = &[_]u64{}, +skip_all: []const u64 = &[_]u64{}, +seed: u64 = 0, + +const Dir = std.fs.Dir; + +pub const WalkerEntry = struct { + /// The containing directory. This can be used to operate directly on `basename` + /// rather than `path`, avoiding `error.NameTooLong` for deeply nested paths. + /// The directory remains open until `next` or `deinit` is called. + dir: Dir, + basename: []const u8, + path: []const u8, + kind: Dir.Entry.Kind, +}; + +const StackItem = struct { + iter: Dir.Iterator, + dirname_len: usize, +}; + +/// After each call to this function, and on deinit(), the memory returned +/// from this function becomes invalid. A copy must be made in order to keep +/// a reference to the path. +pub fn next(self: *Walker) !?WalkerEntry { + while (self.stack.items.len != 0) { + // `top` becomes invalid after appending to `self.stack` + var top = &self.stack.items[self.stack.items.len - 1]; + var dirname_len = top.dirname_len; + if (try top.iter.next()) |base| { + switch (base.kind) { + .Directory => { + if (std.mem.indexOfScalar(u64, self.skip_dirnames, std.hash.Wyhash.hash(self.seed, base.name)) != null) continue; + }, + .File => { + if (std.mem.indexOfScalar(u64, self.skip_filenames, std.hash.Wyhash.hash(self.seed, base.name)) != null) continue; + }, + + // we don't know what it is for a symlink + .SymLink => { + if (std.mem.indexOfScalar(u64, self.skip_all, std.hash.Wyhash.hash(self.seed, base.name)) != null) continue; + }, + + else => {}, + } + + self.name_buffer.shrinkRetainingCapacity(dirname_len); + if (self.name_buffer.items.len != 0) { + try self.name_buffer.append(path.sep); + dirname_len += 1; + } + try self.name_buffer.appendSlice(base.name); + if (base.kind == .Directory) { + var new_dir = top.iter.dir.openDir(base.name, .{ .iterate = true }) catch |err| switch (err) { + error.NameTooLong => unreachable, // no path sep in base.name + else => |e| return e, + }; + { + errdefer new_dir.close(); + try self.stack.append(StackItem{ + .iter = new_dir.iterate(), + .dirname_len = self.name_buffer.items.len, + }); + top = &self.stack.items[self.stack.items.len - 1]; + } + } + return WalkerEntry{ + .dir = top.iter.dir, + .basename = self.name_buffer.items[dirname_len..], + .path = self.name_buffer.items, + .kind = base.kind, + }; + } else { + var item = self.stack.pop(); + if (self.stack.items.len != 0) { + item.iter.dir.close(); + } + } + } + return null; +} + +pub fn deinit(self: *Walker) void { + while (self.stack.popOrNull()) |*item| { + if (self.stack.items.len != 0) { + item.iter.dir.close(); + } + } + self.stack.deinit(); + self.name_buffer.allocator.free(self.skip_all); + self.name_buffer.deinit(); +} + +/// Recursively iterates over a directory. +/// `self` must have been opened with `OpenDirOptions{.iterate = true}`. +/// Must call `Walker.deinit` when done. +/// The order of returned file system entries is undefined. +/// `self` will not be closed after walking it. +pub fn walk( + self: Dir, + allocator: *Allocator, + skip_filenames: []const []const u8, + skip_dirnames: []const []const u8, +) !Walker { + var name_buffer = std.ArrayList(u8).init(allocator); + errdefer name_buffer.deinit(); + + var stack = std.ArrayList(Walker.StackItem).init(allocator); + errdefer stack.deinit(); + + var skip_names = try allocator.alloc(u64, skip_filenames.len + skip_dirnames.len); + const seed = skip_filenames.len + skip_dirnames.len; + var skip_name_i: usize = 0; + + for (skip_filenames) |name| { + skip_names[skip_name_i] = std.hash.Wyhash.hash(seed, name); + skip_name_i += 1; + } + var skip_filenames_ = skip_names[0..skip_name_i]; + var skip_dirnames_ = skip_names[skip_name_i..]; + + for (skip_dirnames) |name, i| { + skip_dirnames_[i] = std.hash.Wyhash.hash(seed, name); + } + + try stack.append(Walker.StackItem{ + .iter = self.iterate(), + .dirname_len = 0, + }); + + return Walker{ + .stack = stack, + .name_buffer = name_buffer, + .skip_all = skip_names, + .seed = seed, + .skip_filenames = skip_filenames_, + .skip_dirnames = skip_dirnames_, + }; +} |