1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
const Lock = @import("../lock.zig").Lock;
const std = @import("std");
const MutableString = @import("../global.zig").MutableString;
const getAllocator = @import("../http_client_async.zig").getAllocator;
const ZlibPool = @This();
const Zlib = @import("../zlib.zig");
lock: Lock = Lock.init(),
items: std.ArrayList(*MutableString),
allocator: std.mem.Allocator,
pub var instance: ZlibPool = undefined;
pub var loaded: bool = false;
pub fn init(allocator: std.mem.Allocator) ZlibPool {
return ZlibPool{
.allocator = allocator,
.items = std.ArrayList(*MutableString).init(allocator),
};
}
pub fn get(this: *ZlibPool) !*MutableString {
switch (this.items.items.len) {
0 => {
var mutable = try getAllocator().create(MutableString);
mutable.* = try MutableString.init(getAllocator(), 0);
return mutable;
},
else => {
return this.items.pop();
},
}
unreachable;
}
pub fn put(this: *ZlibPool, mutable: *MutableString) !void {
mutable.reset();
try this.items.append(mutable);
}
pub fn decompress(compressed_data: []const u8, output: *MutableString) Zlib.ZlibError!void {
// Heuristic: if we have more than 128 KB of data to decompress
// it may take 1ms or so
// We must keep the network thread unblocked as often as possible
// So if we have more than 50 KB of data to decompress, we do it off the network thread
// if (compressed_data.len < 50_000) {
var reader = try Zlib.ZlibReaderArrayList.init(compressed_data, &output.list, getAllocator());
try reader.readAll();
return;
// }
// var task = try DecompressionTask.get(default_allocator);
// defer task.release();
// task.* = DecompressionTask{
// .data = compressed_data,
// .output = output,
// .event_fd = AsyncIO.global.eventfd(),
// };
// task.scheduleAndWait();
// if (task.err) |err| {
// return @errSetCast(Zlib.ZlibError, err);
// }
}
|