aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/cache.zig39
-rw-r--r--src/fs.zig48
2 files changed, 68 insertions, 19 deletions
diff --git a/src/cache.zig b/src/cache.zig
index 6a13726d6..b9ad273d1 100644
--- a/src/cache.zig
+++ b/src/cache.zig
@@ -61,6 +61,7 @@ pub const Fs = struct {
macro_shared_buffer: MutableString,
is_macro_mode: bool = false,
+ stream: bool = false,
// When we are in a macro, the shared buffer may be in use by the in-progress macro.
// so we have to dynamically switch it out.
@@ -80,7 +81,7 @@ pub const Fs = struct {
}
pub fn readFileShared(
- _: *Fs,
+ this: *Fs,
_fs: *fs.FileSystem,
path: [:0]const u8,
_: StoredFileDescriptorType,
@@ -100,12 +101,20 @@ pub const Fs = struct {
}
}
- const file = rfs.readFileWithHandle(path, null, file_handle, true, shared) catch |err| {
- if (comptime Environment.isDebug) {
- Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
+ const file = if (this.stream)
+ rfs.readFileWithHandle(path, null, file_handle, true, shared, true) catch |err| {
+ if (comptime Environment.isDebug) {
+ Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
+ }
+ return err;
}
- return err;
- };
+ else
+ rfs.readFileWithHandle(path, null, file_handle, true, shared, false) catch |err| {
+ if (comptime Environment.isDebug) {
+ Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
+ }
+ return err;
+ };
return Entry{
.contents = file.contents,
@@ -151,12 +160,20 @@ pub const Fs = struct {
}
}
- const file = rfs.readFileWithHandle(path, null, file_handle, use_shared_buffer, c.sharedBuffer()) catch |err| {
- if (Environment.isDebug) {
- Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
+ const file = if (c.stream)
+ rfs.readFileWithHandle(path, null, file_handle, use_shared_buffer, c.sharedBuffer(), true) catch |err| {
+ if (Environment.isDebug) {
+ Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
+ }
+ return err;
}
- return err;
- };
+ else
+ rfs.readFileWithHandle(path, null, file_handle, use_shared_buffer, c.sharedBuffer(), false) catch |err| {
+ if (Environment.isDebug) {
+ Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
+ }
+ return err;
+ };
return Entry{
.contents = file.contents,
diff --git a/src/fs.zig b/src/fs.zig
index 7559e5637..e4a2268dc 100644
--- a/src/fs.zig
+++ b/src/fs.zig
@@ -915,6 +915,7 @@ pub const FileSystem = struct {
file: std.fs.File,
comptime use_shared_buffer: bool,
shared_buffer: *MutableString,
+ comptime stream: bool,
) !File {
FileSystem.setMaxFd(file.handle);
@@ -944,19 +945,50 @@ pub const FileSystem = struct {
// As a mitigation, we can just keep one buffer forever and re-use it for the parsed files
if (use_shared_buffer) {
shared_buffer.reset();
+ var offset: u64 = 0;
try shared_buffer.growBy(size);
shared_buffer.list.expandToCapacity();
- // We use pread to ensure if the file handle was open, it doesn't seek from the last position
- var read_count = file.preadAll(shared_buffer.list.items, 0) catch |err| {
- fs.readFileError(path, err);
- return err;
- };
- shared_buffer.list.items = shared_buffer.list.items[0..read_count];
- file_contents = shared_buffer.list.items;
+
+ // if you press save on a large file we might not read all the
+ // bytes in the first few pread() calls. we only handle this on
+ // stream because we assume that this only realistically happens
+ // during HMR
+ while (true) {
+
+ // We use pread to ensure if the file handle was open, it doesn't seek from the last position
+ const read_count = file.preadAll(shared_buffer.list.items[offset..], offset) catch |err| {
+ fs.readFileError(path, err);
+ return err;
+ };
+ shared_buffer.list.items = shared_buffer.list.items[0 .. read_count + offset];
+ file_contents = shared_buffer.list.items;
+
+ if (comptime stream) {
+ // check again that stat() didn't change the file size
+ // another reason to only do this when stream
+ const new_size = file.getEndPos() catch |err| {
+ fs.readFileError(path, err);
+ return err;
+ };
+
+ offset += read_count;
+
+ // don't infinite loop is we're still not reading more
+ if (read_count == 0) break;
+
+ if (offset < new_size) {
+ try shared_buffer.growBy(new_size - size);
+ shared_buffer.list.expandToCapacity();
+ size = new_size;
+ continue;
+ }
+ }
+ break;
+ }
} else {
// We use pread to ensure if the file handle was open, it doesn't seek from the last position
var buf = try fs.allocator.alloc(u8, size);
- var read_count = file.preadAll(buf, 0) catch |err| {
+ const read_count = file.preadAll(buf, 0) catch |err| {
fs.readFileError(path, err);
return err;
};