aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Vaughan Rouesnel <vrouesnel@gmail.com> 2022-11-12 01:28:37 +0100
committerGravatar Vaughan Rouesnel <vrouesnel@gmail.com> 2022-11-12 01:28:37 +0100
commita2a0480b5530f03432e89c581a4d64f2a30e135d (patch)
treed65ea9dc8a7ea1f652feda2cc085df7634f67769
parent693167c1a63eaa3341ac42da291e523e7d2d0cc9 (diff)
downloadbun-a2a0480b5530f03432e89c581a4d64f2a30e135d.tar.gz
bun-a2a0480b5530f03432e89c581a4d64f2a30e135d.tar.zst
bun-a2a0480b5530f03432e89c581a4d64f2a30e135d.zip
Fix: shadowing
-rw-r--r--src/bun.js/unbounded_queue.zig38
1 files changed, 19 insertions, 19 deletions
diff --git a/src/bun.js/unbounded_queue.zig b/src/bun.js/unbounded_queue.zig
index 86f1e4462..96323b288 100644
--- a/src/bun.js/unbounded_queue.zig
+++ b/src/bun.js/unbounded_queue.zig
@@ -52,45 +52,45 @@ pub fn UnboundedQueue(comptime T: type, comptime next_field: meta.FieldEnum(T))
count: usize = 0,
front: T align(queue_padding_length) = init: {
var stub: T = undefined;
- @field(stub, next) = null;
+ @field(stub, _next) = null;
break :init stub;
},
pub fn push(self: *Self, src: *T) void {
assert(@atomicRmw(usize, &self.count, .Add, 1, .Release) >= 0);
- @field(src, next) = null;
+ @field(src, _next) = null;
const old_back = @atomicRmw(?*T, &self.back, .Xchg, src, .AcqRel) orelse &self.front;
- @field(old_back, next) = src;
+ @field(old_back, _next) = src;
}
pub fn pushBatch(self: *Self, first: *T, last: *T, count: usize) void {
assert(@atomicRmw(usize, &self.count, .Add, count, .Release) >= 0);
- @field(last, next) = null;
+ @field(last, _next) = null;
const old_back = @atomicRmw(?*T, &self.back, .Xchg, last, .AcqRel) orelse &self.front;
- @field(old_back, next) = first;
+ @field(old_back, _next) = first;
}
pub fn pop(self: *Self) ?*T {
- const first = @atomicLoad(?*T, &@field(self.front, next), .Acquire) orelse return null;
- if (@atomicLoad(?*T, &@field(first, next), .Acquire)) |next_item| {
- @atomicStore(?*T, &@field(self.front, next), next_item, .Monotonic);
+ const first = @atomicLoad(?*T, &@field(self.front, _next), .Acquire) orelse return null;
+ if (@atomicLoad(?*T, &@field(first, _next), .Acquire)) |next_item| {
+ @atomicStore(?*T, &@field(self.front, _next), next_item, .Monotonic);
assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1);
return first;
}
const last = @atomicLoad(?*T, &self.back, .Acquire) orelse &self.front;
if (first != last) return null;
- @atomicStore(?*T, &@field(self.front, next), null, .Monotonic);
+ @atomicStore(?*T, &@field(self.front, _next), null, .Monotonic);
if (@cmpxchgStrong(?*T, &self.back, last, &self.front, .AcqRel, .Acquire) == null) {
assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1);
return first;
}
- var next_item = @atomicLoad(?*T, &@field(first, next), .Acquire);
+ var next_item = @atomicLoad(?*T, &@field(first, _next), .Acquire);
while (next_item == null) : (atomic.spinLoopHint()) {
- next_item = @atomicLoad(?*T, &@field(first, next), .Acquire);
+ next_item = @atomicLoad(?*T, &@field(first, _next), .Acquire);
}
- @atomicStore(?*T, &@field(self.front, next), next_item, .Monotonic);
+ @atomicStore(?*T, &@field(self.front, _next), next_item, .Monotonic);
assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1);
return first;
}
@@ -98,10 +98,10 @@ pub fn UnboundedQueue(comptime T: type, comptime next_field: meta.FieldEnum(T))
pub fn popBatch(self: *Self) Self.Batch {
var batch: Self.Batch = .{};
- var front = @atomicLoad(?*T, &@field(self.front, next), .Acquire) orelse return batch;
+ var front = @atomicLoad(?*T, &@field(self.front, _next), .Acquire) orelse return batch;
batch.front = front;
- var next_item = @atomicLoad(?*T, &@field(front, next), .Acquire);
+ var next_item = @atomicLoad(?*T, &@field(front, _next), .Acquire);
while (next_item) |next_node| : (next_item = @atomicLoad(?*T, &@field(next_node, next), .Acquire)) {
batch.count += 1;
batch.last = front;
@@ -111,12 +111,12 @@ pub fn UnboundedQueue(comptime T: type, comptime next_field: meta.FieldEnum(T))
const last = @atomicLoad(?*T, &self.back, .Acquire) orelse &self.front;
if (front != last) {
- @atomicStore(?*T, &@field(self.front, next), front, .Release);
+ @atomicStore(?*T, &@field(self.front, _next), front, .Release);
assert(@atomicRmw(usize, &self.count, .Sub, batch.count, .Monotonic) >= batch.count);
return batch;
}
- @atomicStore(?*T, &@field(self.front, next), null, .Monotonic);
+ @atomicStore(?*T, &@field(self.front, _next), null, .Monotonic);
if (@cmpxchgStrong(?*T, &self.back, last, &self.front, .AcqRel, .Acquire) == null) {
batch.count += 1;
batch.last = front;
@@ -124,13 +124,13 @@ pub fn UnboundedQueue(comptime T: type, comptime next_field: meta.FieldEnum(T))
return batch;
}
- next_item = @atomicLoad(?*T, &@field(front, next), .Acquire);
+ next_item = @atomicLoad(?*T, &@field(front, _next), .Acquire);
while (next_item == null) : (atomic.spinLoopHint()) {
- next_item = @atomicLoad(?*T, &@field(front, next), .Acquire);
+ next_item = @atomicLoad(?*T, &@field(front, _next), .Acquire);
}
batch.count += 1;
- @atomicStore(?*T, &@field(self.front, next), next_item, .Monotonic);
+ @atomicStore(?*T, &@field(self.front, _next), next_item, .Monotonic);
batch.last = front;
assert(@atomicRmw(usize, &self.count, .Sub, batch.count, .Monotonic) >= batch.count);
return batch;