aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/api/schema.d.ts3
-rw-r--r--src/api/schema.js14
-rw-r--r--src/api/schema.peechy5
-rw-r--r--src/api/schema.zig26
-rw-r--r--src/bundler.zig85
-rw-r--r--src/cli.zig14
-rw-r--r--src/deps/picohttp.zig63
-rw-r--r--src/deps/picohttpparser.c665
-rw-r--r--src/deps/picohttpparser.h87
-rw-r--r--src/exact_size_matcher.zig53
-rw-r--r--src/http.zig304
-rw-r--r--src/http/mime_type.zig99
-rw-r--r--src/options.zig56
-rw-r--r--src/string_immutable.zig60
14 files changed, 1379 insertions, 155 deletions
diff --git a/src/api/schema.d.ts b/src/api/schema.d.ts
index c78d7fc6c..23eafa37b 100644
--- a/src/api/schema.d.ts
+++ b/src/api/schema.d.ts
@@ -126,8 +126,9 @@ type uint32 = number;
loader_values?: Loader[];
main_fields?: string[];
platform?: Platform;
- watch?: boolean;
+ serve?: boolean;
extension_order?: string[];
+ public_dir?: string;
}
export interface FileHandle {
diff --git a/src/api/schema.js b/src/api/schema.js
index 43670dff0..0d7d942b3 100644
--- a/src/api/schema.js
+++ b/src/api/schema.js
@@ -228,7 +228,7 @@ function decodeTransformOptions(bb) {
break;
case 18:
- result["watch"] = !!bb.readByte();
+ result["serve"] = !!bb.readByte();
break;
case 19:
@@ -237,6 +237,10 @@ function decodeTransformOptions(bb) {
for (var i = 0; i < length; i++) values[i] = bb.readString();
break;
+ case 20:
+ result["public_dir"] = bb.readString();
+ break;
+
default:
throw new Error("Attempted to parse invalid message");
}
@@ -393,7 +397,7 @@ if (encoded === void 0) throw new Error("Invalid value " + JSON.stringify(value)
bb.writeByte(encoded);
}
- var value = message["watch"];
+ var value = message["serve"];
if (value != null) {
bb.writeByte(18);
bb.writeByte(value);
@@ -409,6 +413,12 @@ bb.writeByte(encoded);
bb.writeString(value);
}
}
+
+ var value = message["public_dir"];
+ if (value != null) {
+ bb.writeByte(20);
+ bb.writeString(value);
+ }
bb.writeByte(0);
}
diff --git a/src/api/schema.peechy b/src/api/schema.peechy
index 6c6889f00..9ba51c3d6 100644
--- a/src/api/schema.peechy
+++ b/src/api/schema.peechy
@@ -49,6 +49,7 @@ message TransformOptions {
string public_url = 4;
string absolute_working_dir = 5;
+
string[] define_keys = 6;
string[] define_values = 7;
@@ -68,9 +69,11 @@ message TransformOptions {
string[] main_fields = 16;
Platform platform = 17;
- bool watch = 18;
+ bool serve = 18;
string[] extension_order = 19;
+
+ string public_dir = 20;
}
struct FileHandle {
diff --git a/src/api/schema.zig b/src/api/schema.zig
index bba5a3c94..262c0d01c 100644
--- a/src/api/schema.zig
+++ b/src/api/schema.zig
@@ -200,12 +200,15 @@ pub const Api = struct {
/// platform
platform: ?Platform = null,
- /// watch
- watch: ?bool = null,
+ /// serve
+ serve: ?bool = null,
/// extension_order
extension_order: []const []const u8,
+ /// public_dir
+ public_dir: ?[]const u8 = null,
+
pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!TransformOptions {
var obj = std.mem.zeroes(TransformOptions);
try update(&obj, allocator, reader);
@@ -381,7 +384,7 @@ pub const Api = struct {
result.platform = try reader.readEnum(Platform, .Little);
},
18 => {
- result.watch = (try reader.readByte()) == @as(u8, 1);
+ result.serve = (try reader.readByte()) == @as(u8, 1);
},
19 => {
{
@@ -398,6 +401,13 @@ pub const Api = struct {
}
}
},
+ 20 => {
+ length = try reader.readIntNative(u32);
+ if ((result.public_dir orelse &([_]u8{})).len != length) {
+ result.public_dir = try allocator.alloc(u8, length);
+ }
+ _ = try reader.readAll(result.public_dir.?);
+ },
else => {
return error.InvalidMessage;
},
@@ -559,9 +569,9 @@ pub const Api = struct {
try writer.writeIntNative(@TypeOf(@enumToInt(result.platform orelse unreachable)), @enumToInt(result.platform orelse unreachable));
}
- if (result.watch) |watch| {
+ if (result.serve) |serve| {
try writer.writeByte(18);
- try writer.writeByte(@boolToInt(watch));
+ try writer.writeByte(@boolToInt(serve));
}
if (result.extension_order) |extension_order| {
@@ -576,6 +586,12 @@ pub const Api = struct {
}
}
}
+
+ if (result.public_dir) |public_dir| {
+ try writer.writeByte(20);
+ try writer.writeIntNative(u32, @intCast(u32, public_dir.len));
+ try writer.writeAll(std.mem.sliceAsBytes(public_dir));
+ }
try writer.writeByte(0);
return;
}
diff --git a/src/bundler.zig b/src/bundler.zig
index 6068c3a77..d0af7bffc 100644
--- a/src/bundler.zig
+++ b/src/bundler.zig
@@ -21,6 +21,43 @@ const ThreadPool = sync.ThreadPool;
const ThreadSafeHashMap = @import("./thread_safe_hash_map.zig");
const ImportRecord = @import("./import_record.zig").ImportRecord;
const allocators = @import("./allocators.zig");
+const MimeType = @import("./http/mime_type.zig");
+
+pub const ServeResult = struct {
+ errors: []logger.Msg = &([_]logger.Msg),
+ warnings: []logger.Msg = &([_]logger.Msg),
+ output: Output,
+ status: Status,
+
+ mime_type: MimeType,
+
+ pub const Status = enum {
+ success,
+ build_failed,
+ not_found,
+ permission_error,
+ };
+
+ // Either we:
+ // - send pre-buffered asset body
+ // - stream a file from the file system
+ pub const Output = union(Tag) {
+ file: File,
+ build: options.OutputFile,
+ none: u0,
+
+ pub const Tag = enum {
+ file,
+ build,
+ none,
+ };
+
+ pub const File = struct {
+ absolute_path: string,
+ };
+ };
+};
+
// const BundleMap =
const ResolveResults = ThreadSafeHashMap.ThreadSafeStringHashMap(Resolver.Resolver.Result);
pub const Bundler = struct {
@@ -50,7 +87,7 @@ pub const Bundler = struct {
log: *logger.Log,
opts: Api.TransformOptions,
) !Bundler {
- var fs = try Fs.FileSystem.init1(allocator, opts.absolute_working_dir, opts.watch orelse false);
+ var fs = try Fs.FileSystem.init1(allocator, opts.absolute_working_dir, opts.serve orelse false);
const bundle_options = try options.BundleOptions.fromApi(allocator, fs, log, opts);
relative_paths_list = ImportPathsList.init(allocator);
@@ -140,7 +177,7 @@ pub const Bundler = struct {
}
// Step 1. Parse & scan
- const result = bundler.parse(resolve_result.path_pair.primary) orelse return null;
+ const result = bundler.parse(resolve_result.path_pair.primary, bundler.options.loaders.get(resolve_result.path_pair.primary.text) orelse .file) orelse return null;
switch (result.loader) {
.jsx, .js, .ts, .tsx => {
@@ -197,7 +234,7 @@ pub const Bundler = struct {
ast: js_ast.Ast,
};
pub var tracing_start: i128 = if (enableTracing) 0 else undefined;
- pub fn parse(bundler: *Bundler, path: Fs.Path) ?ParseResult {
+ pub fn parse(bundler: *Bundler, path: Fs.Path, loader: options.Loader) ?ParseResult {
if (enableTracing) {
tracing_start = std.time.nanoTimestamp();
}
@@ -207,7 +244,6 @@ pub const Bundler = struct {
}
}
var result: ParseResult = undefined;
- const loader: options.Loader = bundler.options.loaders.get(path.name.ext) orelse .file;
const entry = bundler.resolver.caches.fs.readFile(bundler.fs, path.text) catch return null;
const source = logger.Source.initFile(Fs.File{ .path = path, .contents = entry.contents }, bundler.allocator) catch return null;
@@ -249,22 +285,48 @@ pub const Bundler = struct {
return null;
}
+ pub fn buildServeResultOutput(bundler: *Bundler, resolve: Resolver.Resolver.Result, loader: options.Loader) !ServeResult.Output {
+ switch (loader) {
+ .js, .jsx, .ts, .tsx, .json => {
+ return ServeResult.Output{ .built = bundler.buildWithResolveResult(resolve) orelse error.BuildFailed };
+ },
+ else => {
+ return ServeResult.Output{ .file = ServeResult.Output.File{ .absolute_path = resolve.path_pair.primary.text } };
+ },
+ }
+ }
+
+ // We try to be mostly stateless when serving
+ // This means we need a slightly different resolver setup
+ // Essentially:
pub fn buildFile(
bundler: *Bundler,
+ log: *logger.Log,
allocator: *std.mem.Allocator,
relative_path: string,
- ) !options.TransformResult {
- var log = logger.Log.init(bundler.allocator);
+ extension: string,
+ ) !ServeResult {
var original_resolver_logger = bundler.resolver.log;
var original_bundler_logger = bundler.log;
defer bundler.log = original_bundler_logger;
defer bundler.resolver.log = original_resolver_logger;
bundler.log = log;
bundler.resolver.log = log;
- const resolved = try bundler.resolver.resolve(bundler.fs.top_level_dir, relative_path, .entry_point);
- const output_file = try bundler.buildWithResolveResult(resolved);
- var output_files = try allocator.alloc(options.OutputFile, 1);
- return try options.TransformResult.init(output_files, log, allocator);
+
+ var needs_resolve = false;
+
+ // is it missing an extension?
+ // it either:
+ // - needs to be resolved
+ // - is html
+ // We should first check if it's html.
+ if (extension.len == 0) {}
+
+ const initial_loader = bundler.options.loaders.get(extension);
+
+ const resolved = try bundler.resolver.resolve(bundler.fs.top_level_dir, relative_path, .entry_point) orelse {};
+
+ const loader = bundler.options.loaders.get(resolved.path_pair.primary.text) orelse .file;
}
pub fn bundle(
@@ -432,8 +494,7 @@ pub const Transformer = struct {
var output_i: usize = 0;
var chosen_alloc: *std.mem.Allocator = allocator;
var arena: std.heap.ArenaAllocator = undefined;
- const watch = opts.watch orelse false;
- const use_arenas = opts.entry_points.len > 8 or watch;
+ const use_arenas = opts.entry_points.len > 8;
for (opts.entry_points) |entry_point, i| {
if (use_arenas) {
diff --git a/src/cli.zig b/src/cli.zig
index decfbec6b..1c7d4e886 100644
--- a/src/cli.zig
+++ b/src/cli.zig
@@ -15,6 +15,7 @@ usingnamespace @import("ast/base.zig");
usingnamespace @import("defines.zig");
const panicky = @import("panic_handler.zig");
const Api = @import("api/schema.zig").Api;
+const resolve_path = @import("./resolver/resolve_path.zig");
const clap = @import("clap");
@@ -114,6 +115,8 @@ pub const Cli = struct {
clap.parseParam("-i, --inject <STR>... Inject module at the top of every file") catch unreachable,
clap.parseParam("--cwd <STR> Absolute path to resolve entry points from. Defaults to cwd") catch unreachable,
clap.parseParam("--public-url <STR> Rewrite import paths to start with --public-url. Useful for web browsers.") catch unreachable,
+ clap.parseParam("--serve Start a local dev server. This also sets resolve to \"lazy\".") catch unreachable,
+ clap.parseParam("--public-dir <STR> Top-level directory for .html files, fonts, images, or anything external. Only relevant with --serve. Defaults to \"<cwd>/public\", to match create-react-app and Next.js") catch unreachable,
clap.parseParam("--jsx-factory <STR> Changes the function called when compiling JSX elements using the classic JSX runtime") catch unreachable,
clap.parseParam("--jsx-fragment <STR> Changes the function called when compiling JSX fragments using the classic JSX runtime") catch unreachable,
clap.parseParam("--jsx-import-source <STR> Declares the module specifier to be used for importing the jsx and jsxs factory functions. Default: \"react\"") catch unreachable,
@@ -154,6 +157,8 @@ pub const Cli = struct {
var entry_points = args.positionals();
var inject = args.options("--inject");
var output_dir = args.option("--outdir");
+ const serve = args.flag("--serve");
+
var write = entry_points.len > 1 or output_dir != null;
if (write and output_dir == null) {
var _paths = [_]string{ cwd, "out" };
@@ -259,7 +264,9 @@ pub const Cli = struct {
.define_values = define_values,
.loader_keys = loader_keys,
.loader_values = loader_values,
+ .public_dir = if (args.option("--public-dir")) |public_dir| allocator.dupe(u8, public_dir) catch unreachable else null,
.write = write,
+ .serve = serve,
.inject = inject,
.entry_points = entry_points,
.extension_order = args.options("--extension-order"),
@@ -285,7 +292,12 @@ pub const Cli = struct {
MainPanicHandler.Singleton = &panicker;
var args = try Arguments.parse(alloc.static, stdout, stderr);
- try Server.start(allocator, &args);
+
+ if (args.serve orelse false) {
+ try Server.start(allocator, &args);
+ return;
+ }
+
var result: options.TransformResult = undefined;
switch (args.resolve orelse Api.ResolveMode.dev) {
Api.ResolveMode.disable => {
diff --git a/src/deps/picohttp.zig b/src/deps/picohttp.zig
index 407c203a1..5fff599cc 100644
--- a/src/deps/picohttp.zig
+++ b/src/deps/picohttp.zig
@@ -7,16 +7,6 @@ const fmt = std.fmt;
const assert = std.debug.assert;
-pub fn addTo(step: *std.build.LibExeObjStep, comptime dir: []const u8) void {
- step.addCSourceFile(dir ++ "/lib/picohttpparser.c", &[_][]const u8{});
- step.addIncludeDir(dir ++ "/lib");
-
- step.addPackage(.{
- .name = "picohttp",
- .path = dir ++ "/picohttp.zig",
- });
-}
-
pub const Header = struct {
name: []const u8,
value: []const u8,
@@ -40,59 +30,11 @@ pub const Header = struct {
};
pub const Request = struct {
- method_: []const u8,
- method: Method,
+ method: []const u8,
path: []const u8,
minor_version: usize,
headers: []const Header,
- pub const Method = enum {
- GET,
- HEAD,
- PATCH,
- PUT,
- POST,
- OPTIONS,
- CONNECT,
- TRACE,
-
- pub fn which(str: []const u8) ?Method {
- if (str.len < 3) {
- return null;
- }
-
- switch (Match.match(str[0..2])) {
- Match.case("GE"), Match.case("ge") => {
- return .GET;
- },
- Match.case("HE"), Match.case("he") => {
- return .HEAD;
- },
- Match.case("PA"), Match.case("pa") => {
- return .PATCH;
- },
- Match.case("PO"), Match.case("po") => {
- return .POST;
- },
- Match.case("PU"), Match.case("pu") => {
- return .PUT;
- },
- Match.case("OP"), Match.case("op") => {
- return .OPTIONS;
- },
- Match.case("CO"), Match.case("co") => {
- return .CONNECT;
- },
- Match.case("TR"), Match.case("tr") => {
- return .TRACE;
- },
- else => {
- return null;
- },
- }
- }
- };
-
pub fn parse(buf: []const u8, src: []Header) !Request {
var method: []const u8 = undefined;
var path: []const u8 = undefined;
@@ -116,8 +58,7 @@ pub const Request = struct {
-1 => error.BadRequest,
-2 => error.ShortRead,
else => |bytes_read| Request{
- .method_ = method,
- .method = Request.Method.which(method) orelse return error.InvalidMethod,
+ .method = method,
.path = path,
.minor_version = @intCast(usize, minor_version),
.headers = src[0..num_headers],
diff --git a/src/deps/picohttpparser.c b/src/deps/picohttpparser.c
new file mode 100644
index 000000000..5e5783abb
--- /dev/null
+++ b/src/deps/picohttpparser.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2009-2014 Kazuho Oku, Tokuhiro Matsuno, Daisuke Murase,
+ * Shigeo Mitsunari
+ *
+ * The software is licensed under either the MIT License (below) or the Perl
+ * license.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <string.h>
+#ifdef __SSE4_2__
+#ifdef _MSC_VER
+#include <nmmintrin.h>
+#else
+#include <x86intrin.h>
+#endif
+#endif
+#include "picohttpparser.h"
+
+#if __GNUC__ >= 3
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#else
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif
+
+#ifdef _MSC_VER
+#define ALIGNED(n) _declspec(align(n))
+#else
+#define ALIGNED(n) __attribute__((aligned(n)))
+#endif
+
+#define IS_PRINTABLE_ASCII(c) ((unsigned char)(c)-040u < 0137u)
+
+#define CHECK_EOF() \
+ if (buf == buf_end) { \
+ *ret = -2; \
+ return NULL; \
+ }
+
+#define EXPECT_CHAR_NO_CHECK(ch) \
+ if (*buf++ != ch) { \
+ *ret = -1; \
+ return NULL; \
+ }
+
+#define EXPECT_CHAR(ch) \
+ CHECK_EOF(); \
+ EXPECT_CHAR_NO_CHECK(ch);
+
+#define ADVANCE_TOKEN(tok, toklen) \
+ do { \
+ const char *tok_start = buf; \
+ static const char ALIGNED(16) ranges2[16] = "\000\040\177\177"; \
+ int found2; \
+ buf = findchar_fast(buf, buf_end, ranges2, 4, &found2); \
+ if (!found2) { \
+ CHECK_EOF(); \
+ } \
+ while (1) { \
+ if (*buf == ' ') { \
+ break; \
+ } else if (unlikely(!IS_PRINTABLE_ASCII(*buf))) { \
+ if ((unsigned char)*buf < '\040' || *buf == '\177') { \
+ *ret = -1; \
+ return NULL; \
+ } \
+ } \
+ ++buf; \
+ CHECK_EOF(); \
+ } \
+ tok = tok_start; \
+ toklen = buf - tok_start; \
+ } while (0)
+
+static const char *token_char_map = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\1\0\1\1\1\1\1\0\0\1\1\0\1\1\0\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0"
+ "\0\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\1\1"
+ "\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\1\0\1\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
+
+static const char *findchar_fast(const char *buf, const char *buf_end, const char *ranges, size_t ranges_size, int *found)
+{
+ *found = 0;
+#if __SSE4_2__
+ if (likely(buf_end - buf >= 16)) {
+ __m128i ranges16 = _mm_loadu_si128((const __m128i *)ranges);
+
+ size_t left = (buf_end - buf) & ~15;
+ do {
+ __m128i b16 = _mm_loadu_si128((const __m128i *)buf);
+ int r = _mm_cmpestri(ranges16, ranges_size, b16, 16, _SIDD_LEAST_SIGNIFICANT | _SIDD_CMP_RANGES | _SIDD_UBYTE_OPS);
+ if (unlikely(r != 16)) {
+ buf += r;
+ *found = 1;
+ break;
+ }
+ buf += 16;
+ left -= 16;
+ } while (likely(left != 0));
+ }
+#else
+ /* suppress unused parameter warning */
+ (void)buf_end;
+ (void)ranges;
+ (void)ranges_size;
+#endif
+ return buf;
+}
+
+static const char *get_token_to_eol(const char *buf, const char *buf_end, const char **token, size_t *token_len, int *ret)
+{
+ const char *token_start = buf;
+
+#ifdef __SSE4_2__
+ static const char ALIGNED(16) ranges1[16] = "\0\010" /* allow HT */
+ "\012\037" /* allow SP and up to but not including DEL */
+ "\177\177"; /* allow chars w. MSB set */
+ int found;
+ buf = findchar_fast(buf, buf_end, ranges1, 6, &found);
+ if (found)
+ goto FOUND_CTL;
+#else
+ /* find non-printable char within the next 8 bytes, this is the hottest code; manually inlined */
+ while (likely(buf_end - buf >= 8)) {
+#define DOIT() \
+ do { \
+ if (unlikely(!IS_PRINTABLE_ASCII(*buf))) \
+ goto NonPrintable; \
+ ++buf; \
+ } while (0)
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+#undef DOIT
+ continue;
+ NonPrintable:
+ if ((likely((unsigned char)*buf < '\040') && likely(*buf != '\011')) || unlikely(*buf == '\177')) {
+ goto FOUND_CTL;
+ }
+ ++buf;
+ }
+#endif
+ for (;; ++buf) {
+ CHECK_EOF();
+ if (unlikely(!IS_PRINTABLE_ASCII(*buf))) {
+ if ((likely((unsigned char)*buf < '\040') && likely(*buf != '\011')) || unlikely(*buf == '\177')) {
+ goto FOUND_CTL;
+ }
+ }
+ }
+FOUND_CTL:
+ if (likely(*buf == '\015')) {
+ ++buf;
+ EXPECT_CHAR('\012');
+ *token_len = buf - 2 - token_start;
+ } else if (*buf == '\012') {
+ *token_len = buf - token_start;
+ ++buf;
+ } else {
+ *ret = -1;
+ return NULL;
+ }
+ *token = token_start;
+
+ return buf;
+}
+
+static const char *is_complete(const char *buf, const char *buf_end, size_t last_len, int *ret)
+{
+ int ret_cnt = 0;
+ buf = last_len < 3 ? buf : buf + last_len - 3;
+
+ while (1) {
+ CHECK_EOF();
+ if (*buf == '\015') {
+ ++buf;
+ CHECK_EOF();
+ EXPECT_CHAR('\012');
+ ++ret_cnt;
+ } else if (*buf == '\012') {
+ ++buf;
+ ++ret_cnt;
+ } else {
+ ++buf;
+ ret_cnt = 0;
+ }
+ if (ret_cnt == 2) {
+ return buf;
+ }
+ }
+
+ *ret = -2;
+ return NULL;
+}
+
+#define PARSE_INT(valp_, mul_) \
+ if (*buf < '0' || '9' < *buf) { \
+ buf++; \
+ *ret = -1; \
+ return NULL; \
+ } \
+ *(valp_) = (mul_) * (*buf++ - '0');
+
+#define PARSE_INT_3(valp_) \
+ do { \
+ int res_ = 0; \
+ PARSE_INT(&res_, 100) \
+ *valp_ = res_; \
+ PARSE_INT(&res_, 10) \
+ *valp_ += res_; \
+ PARSE_INT(&res_, 1) \
+ *valp_ += res_; \
+ } while (0)
+
+/* returned pointer is always within [buf, buf_end), or null */
+static const char *parse_token(const char *buf, const char *buf_end, const char **token, size_t *token_len, char next_char,
+ int *ret)
+{
+ /* We use pcmpestri to detect non-token characters. This instruction can take no more than eight character ranges (8*2*8=128
+ * bits that is the size of a SSE register). Due to this restriction, characters `|` and `~` are handled in the slow loop. */
+ static const char ALIGNED(16) ranges[] = "\x00 " /* control chars and up to SP */
+ "\"\"" /* 0x22 */
+ "()" /* 0x28,0x29 */
+ ",," /* 0x2c */
+ "//" /* 0x2f */
+ ":@" /* 0x3a-0x40 */
+ "[]" /* 0x5b-0x5d */
+ "{\xff"; /* 0x7b-0xff */
+ const char *buf_start = buf;
+ int found;
+ buf = findchar_fast(buf, buf_end, ranges, sizeof(ranges) - 1, &found);
+ if (!found) {
+ CHECK_EOF();
+ }
+ while (1) {
+ if (*buf == next_char) {
+ break;
+ } else if (!token_char_map[(unsigned char)*buf]) {
+ *ret = -1;
+ return NULL;
+ }
+ ++buf;
+ CHECK_EOF();
+ }
+ *token = buf_start;
+ *token_len = buf - buf_start;
+ return buf;
+}
+
+/* returned pointer is always within [buf, buf_end), or null */
+static const char *parse_http_version(const char *buf, const char *buf_end, int *minor_version, int *ret)
+{
+ /* we want at least [HTTP/1.<two chars>] to try to parse */
+ if (buf_end - buf < 9) {
+ *ret = -2;
+ return NULL;
+ }
+ EXPECT_CHAR_NO_CHECK('H');
+ EXPECT_CHAR_NO_CHECK('T');
+ EXPECT_CHAR_NO_CHECK('T');
+ EXPECT_CHAR_NO_CHECK('P');
+ EXPECT_CHAR_NO_CHECK('/');
+ EXPECT_CHAR_NO_CHECK('1');
+ EXPECT_CHAR_NO_CHECK('.');
+ PARSE_INT(minor_version, 1);
+ return buf;
+}
+
+static const char *parse_headers(const char *buf, const char *buf_end, struct phr_header *headers, size_t *num_headers,
+ size_t max_headers, int *ret)
+{
+ for (;; ++*num_headers) {
+ CHECK_EOF();
+ if (*buf == '\015') {
+ ++buf;
+ EXPECT_CHAR('\012');
+ break;
+ } else if (*buf == '\012') {
+ ++buf;
+ break;
+ }
+ if (*num_headers == max_headers) {
+ *ret = -1;
+ return NULL;
+ }
+ if (!(*num_headers != 0 && (*buf == ' ' || *buf == '\t'))) {
+ /* parsing name, but do not discard SP before colon, see
+ * http://www.mozilla.org/security/announce/2006/mfsa2006-33.html */
+ if ((buf = parse_token(buf, buf_end, &headers[*num_headers].name, &headers[*num_headers].name_len, ':', ret)) == NULL) {
+ return NULL;
+ }
+ if (headers[*num_headers].name_len == 0) {
+ *ret = -1;
+ return NULL;
+ }
+ ++buf;
+ for (;; ++buf) {
+ CHECK_EOF();
+ if (!(*buf == ' ' || *buf == '\t')) {
+ break;
+ }
+ }
+ } else {
+ headers[*num_headers].name = NULL;
+ headers[*num_headers].name_len = 0;
+ }
+ const char *value;
+ size_t value_len;
+ if ((buf = get_token_to_eol(buf, buf_end, &value, &value_len, ret)) == NULL) {
+ return NULL;
+ }
+ /* remove trailing SPs and HTABs */
+ const char *value_end = value + value_len;
+ for (; value_end != value; --value_end) {
+ const char c = *(value_end - 1);
+ if (!(c == ' ' || c == '\t')) {
+ break;
+ }
+ }
+ headers[*num_headers].value = value;
+ headers[*num_headers].value_len = value_end - value;
+ }
+ return buf;
+}
+
+static const char *parse_request(const char *buf, const char *buf_end, const char **method, size_t *method_len, const char **path,
+ size_t *path_len, int *minor_version, struct phr_header *headers, size_t *num_headers,
+ size_t max_headers, int *ret)
+{
+ /* skip first empty line (some clients add CRLF after POST content) */
+ CHECK_EOF();
+ if (*buf == '\015') {
+ ++buf;
+ EXPECT_CHAR('\012');
+ } else if (*buf == '\012') {
+ ++buf;
+ }
+
+ /* parse request line */
+ if ((buf = parse_token(buf, buf_end, method, method_len, ' ', ret)) == NULL) {
+ return NULL;
+ }
+ do {
+ ++buf;
+ CHECK_EOF();
+ } while (*buf == ' ');
+ ADVANCE_TOKEN(*path, *path_len);
+ do {
+ ++buf;
+ CHECK_EOF();
+ } while (*buf == ' ');
+ if (*method_len == 0 || *path_len == 0) {
+ *ret = -1;
+ return NULL;
+ }
+ if ((buf = parse_http_version(buf, buf_end, minor_version, ret)) == NULL) {
+ return NULL;
+ }
+ if (*buf == '\015') {
+ ++buf;
+ EXPECT_CHAR('\012');
+ } else if (*buf == '\012') {
+ ++buf;
+ } else {
+ *ret = -1;
+ return NULL;
+ }
+
+ return parse_headers(buf, buf_end, headers, num_headers, max_headers, ret);
+}
+
+int phr_parse_request(const char *buf_start, size_t len, const char **method, size_t *method_len, const char **path,
+ size_t *path_len, int *minor_version, struct phr_header *headers, size_t *num_headers, size_t last_len)
+{
+ const char *buf = buf_start, *buf_end = buf_start + len;
+ size_t max_headers = *num_headers;
+ int r;
+
+ *method = NULL;
+ *method_len = 0;
+ *path = NULL;
+ *path_len = 0;
+ *minor_version = -1;
+ *num_headers = 0;
+
+ /* if last_len != 0, check if the request is complete (a fast countermeasure
+ againt slowloris */
+ if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) {
+ return r;
+ }
+
+ if ((buf = parse_request(buf, buf_end, method, method_len, path, path_len, minor_version, headers, num_headers, max_headers,
+ &r)) == NULL) {
+ return r;
+ }
+
+ return (int)(buf - buf_start);
+}
+
+static const char *parse_response(const char *buf, const char *buf_end, int *minor_version, int *status, const char **msg,
+ size_t *msg_len, struct phr_header *headers, size_t *num_headers, size_t max_headers, int *ret)
+{
+ /* parse "HTTP/1.x" */
+ if ((buf = parse_http_version(buf, buf_end, minor_version, ret)) == NULL) {
+ return NULL;
+ }
+ /* skip space */
+ if (*buf != ' ') {
+ *ret = -1;
+ return NULL;
+ }
+ do {
+ ++buf;
+ CHECK_EOF();
+ } while (*buf == ' ');
+ /* parse status code, we want at least [:digit:][:digit:][:digit:]<other char> to try to parse */
+ if (buf_end - buf < 4) {
+ *ret = -2;
+ return NULL;
+ }
+ PARSE_INT_3(status);
+
+ /* get message including preceding space */
+ if ((buf = get_token_to_eol(buf, buf_end, msg, msg_len, ret)) == NULL) {
+ return NULL;
+ }
+ if (*msg_len == 0) {
+ /* ok */
+ } else if (**msg == ' ') {
+ /* Remove preceding space. Successful return from `get_token_to_eol` guarantees that we would hit something other than SP
+ * before running past the end of the given buffer. */
+ do {
+ ++*msg;
+ --*msg_len;
+ } while (**msg == ' ');
+ } else {
+ /* garbage found after status code */
+ *ret = -1;
+ return NULL;
+ }
+
+ return parse_headers(buf, buf_end, headers, num_headers, max_headers, ret);
+}
+
+int phr_parse_response(const char *buf_start, size_t len, int *minor_version, int *status, const char **msg, size_t *msg_len,
+ struct phr_header *headers, size_t *num_headers, size_t last_len)
+{
+ const char *buf = buf_start, *buf_end = buf + len;
+ size_t max_headers = *num_headers;
+ int r;
+
+ *minor_version = -1;
+ *status = 0;
+ *msg = NULL;
+ *msg_len = 0;
+ *num_headers = 0;
+
+ /* if last_len != 0, check if the response is complete (a fast countermeasure
+ against slowloris */
+ if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) {
+ return r;
+ }
+
+ if ((buf = parse_response(buf, buf_end, minor_version, status, msg, msg_len, headers, num_headers, max_headers, &r)) == NULL) {
+ return r;
+ }
+
+ return (int)(buf - buf_start);
+}
+
+int phr_parse_headers(const char *buf_start, size_t len, struct phr_header *headers, size_t *num_headers, size_t last_len)
+{
+ const char *buf = buf_start, *buf_end = buf + len;
+ size_t max_headers = *num_headers;
+ int r;
+
+ *num_headers = 0;
+
+ /* if last_len != 0, check if the response is complete (a fast countermeasure
+ against slowloris */
+ if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) {
+ return r;
+ }
+
+ if ((buf = parse_headers(buf, buf_end, headers, num_headers, max_headers, &r)) == NULL) {
+ return r;
+ }
+
+ return (int)(buf - buf_start);
+}
+
+enum {
+ CHUNKED_IN_CHUNK_SIZE,
+ CHUNKED_IN_CHUNK_EXT,
+ CHUNKED_IN_CHUNK_DATA,
+ CHUNKED_IN_CHUNK_CRLF,
+ CHUNKED_IN_TRAILERS_LINE_HEAD,
+ CHUNKED_IN_TRAILERS_LINE_MIDDLE
+};
+
+static int decode_hex(int ch)
+{
+ if ('0' <= ch && ch <= '9') {
+ return ch - '0';
+ } else if ('A' <= ch && ch <= 'F') {
+ return ch - 'A' + 0xa;
+ } else if ('a' <= ch && ch <= 'f') {
+ return ch - 'a' + 0xa;
+ } else {
+ return -1;
+ }
+}
+
+ssize_t phr_decode_chunked(struct phr_chunked_decoder *decoder, char *buf, size_t *_bufsz)
+{
+ size_t dst = 0, src = 0, bufsz = *_bufsz;
+ ssize_t ret = -2; /* incomplete */
+
+ while (1) {
+ switch (decoder->_state) {
+ case CHUNKED_IN_CHUNK_SIZE:
+ for (;; ++src) {
+ int v;
+ if (src == bufsz)
+ goto Exit;
+ if ((v = decode_hex(buf[src])) == -1) {
+ if (decoder->_hex_count == 0) {
+ ret = -1;
+ goto Exit;
+ }
+ break;
+ }
+ if (decoder->_hex_count == sizeof(size_t) * 2) {
+ ret = -1;
+ goto Exit;
+ }
+ decoder->bytes_left_in_chunk = decoder->bytes_left_in_chunk * 16 + v;
+ ++decoder->_hex_count;
+ }
+ decoder->_hex_count = 0;
+ decoder->_state = CHUNKED_IN_CHUNK_EXT;
+ /* fallthru */
+ case CHUNKED_IN_CHUNK_EXT:
+ /* RFC 7230 A.2 "Line folding in chunk extensions is disallowed" */
+ for (;; ++src) {
+ if (src == bufsz)
+ goto Exit;
+ if (buf[src] == '\012')
+ break;
+ }
+ ++src;
+ if (decoder->bytes_left_in_chunk == 0) {
+ if (decoder->consume_trailer) {
+ decoder->_state = CHUNKED_IN_TRAILERS_LINE_HEAD;
+ break;
+ } else {
+ goto Complete;
+ }
+ }
+ decoder->_state = CHUNKED_IN_CHUNK_DATA;
+ /* fallthru */
+ case CHUNKED_IN_CHUNK_DATA: {
+ size_t avail = bufsz - src;
+ if (avail < decoder->bytes_left_in_chunk) {
+ if (dst != src)
+ memmove(buf + dst, buf + src, avail);
+ src += avail;
+ dst += avail;
+ decoder->bytes_left_in_chunk -= avail;
+ goto Exit;
+ }
+ if (dst != src)
+ memmove(buf + dst, buf + src, decoder->bytes_left_in_chunk);
+ src += decoder->bytes_left_in_chunk;
+ dst += decoder->bytes_left_in_chunk;
+ decoder->bytes_left_in_chunk = 0;
+ decoder->_state = CHUNKED_IN_CHUNK_CRLF;
+ }
+ /* fallthru */
+ case CHUNKED_IN_CHUNK_CRLF:
+ for (;; ++src) {
+ if (src == bufsz)
+ goto Exit;
+ if (buf[src] != '\015')
+ break;
+ }
+ if (buf[src] != '\012') {
+ ret = -1;
+ goto Exit;
+ }
+ ++src;
+ decoder->_state = CHUNKED_IN_CHUNK_SIZE;
+ break;
+ case CHUNKED_IN_TRAILERS_LINE_HEAD:
+ for (;; ++src) {
+ if (src == bufsz)
+ goto Exit;
+ if (buf[src] != '\015')
+ break;
+ }
+ if (buf[src++] == '\012')
+ goto Complete;
+ decoder->_state = CHUNKED_IN_TRAILERS_LINE_MIDDLE;
+ /* fallthru */
+ case CHUNKED_IN_TRAILERS_LINE_MIDDLE:
+ for (;; ++src) {
+ if (src == bufsz)
+ goto Exit;
+ if (buf[src] == '\012')
+ break;
+ }
+ ++src;
+ decoder->_state = CHUNKED_IN_TRAILERS_LINE_HEAD;
+ break;
+ default:
+ assert(!"decoder is corrupt");
+ }
+ }
+
+Complete:
+ ret = bufsz - src;
+Exit:
+ if (dst != src)
+ memmove(buf + dst, buf + src, bufsz - src);
+ *_bufsz = dst;
+ return ret;
+}
+
+int phr_decode_chunked_is_in_data(struct phr_chunked_decoder *decoder)
+{
+ return decoder->_state == CHUNKED_IN_CHUNK_DATA;
+}
+
+#undef CHECK_EOF
+#undef EXPECT_CHAR
+#undef ADVANCE_TOKEN
diff --git a/src/deps/picohttpparser.h b/src/deps/picohttpparser.h
new file mode 100644
index 000000000..07537cf1e
--- /dev/null
+++ b/src/deps/picohttpparser.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2009-2014 Kazuho Oku, Tokuhiro Matsuno, Daisuke Murase,
+ * Shigeo Mitsunari
+ *
+ * The software is licensed under either the MIT License (below) or the Perl
+ * license.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef picohttpparser_h
+#define picohttpparser_h
+
+#include <sys/types.h>
+
+#ifdef _MSC_VER
+#define ssize_t intptr_t
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* contains name and value of a header (name == NULL if is a continuing line
+ * of a multiline header */
+struct phr_header {
+ const char *name;
+ size_t name_len;
+ const char *value;
+ size_t value_len;
+};
+
+/* returns number of bytes consumed if successful, -2 if request is partial,
+ * -1 if failed */
+int phr_parse_request(const char *buf, size_t len, const char **method, size_t *method_len, const char **path, size_t *path_len,
+ int *minor_version, struct phr_header *headers, size_t *num_headers, size_t last_len);
+
+/* ditto */
+int phr_parse_response(const char *_buf, size_t len, int *minor_version, int *status, const char **msg, size_t *msg_len,
+ struct phr_header *headers, size_t *num_headers, size_t last_len);
+
+/* ditto */
+int phr_parse_headers(const char *buf, size_t len, struct phr_header *headers, size_t *num_headers, size_t last_len);
+
+/* should be zero-filled before start */
+struct phr_chunked_decoder {
+ size_t bytes_left_in_chunk; /* number of bytes left in current chunk */
+ char consume_trailer; /* if trailing headers should be consumed */
+ char _hex_count;
+ char _state;
+};
+
+/* the function rewrites the buffer given as (buf, bufsz) removing the chunked-
+ * encoding headers. When the function returns without an error, bufsz is
+ * updated to the length of the decoded data available. Applications should
+ * repeatedly call the function while it returns -2 (incomplete) every time
+ * supplying newly arrived data. If the end of the chunked-encoded data is
+ * found, the function returns a non-negative number indicating the number of
+ * octets left undecoded, that starts from the offset returned by `*bufsz`.
+ * Returns -1 on error.
+ */
+ssize_t phr_decode_chunked(struct phr_chunked_decoder *decoder, char *buf, size_t *bufsz);
+
+/* returns if the chunked decoder is in middle of chunked data */
+int phr_decode_chunked_is_in_data(struct phr_chunked_decoder *decoder);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/exact_size_matcher.zig b/src/exact_size_matcher.zig
index 63950f576..d7ae0e12c 100644
--- a/src/exact_size_matcher.zig
+++ b/src/exact_size_matcher.zig
@@ -1,6 +1,13 @@
const std = @import("std");
pub fn ExactSizeMatcher(comptime max_bytes: usize) type {
+ switch (max_bytes) {
+ 1, 2, 4, 8, 12 => {},
+ else => {
+ @compileError("max_bytes must be 1, 2, 4, 8, or 12.");
+ },
+ }
+
const T = std.meta.Int(
.unsigned,
max_bytes * 8,
@@ -8,39 +15,57 @@ pub fn ExactSizeMatcher(comptime max_bytes: usize) type {
return struct {
pub fn match(str: anytype) T {
- return hash(str) orelse std.math.maxInt(T);
+ switch (str.len) {
+ 1...max_bytes - 1 => {
+ var tmp = std.mem.zeroes([max_bytes]u8);
+ std.mem.copy(u8, &tmp, str[0..str.len]);
+ return std.mem.readIntNative(T, &tmp);
+ },
+ max_bytes => {
+ return std.mem.readIntSliceNative(T, str);
+ },
+ 0 => {
+ return 0;
+ },
+ else => {
+ return std.math.maxInt(T);
+ },
+ }
}
pub fn case(comptime str: []const u8) T {
- return hash(str) orelse std.math.maxInt(T);
+ if (str.len < max_bytes) {
+ var bytes = std.mem.zeroes([max_bytes]u8);
+ const slice_bytes = std.mem.sliceAsBytes(str);
+ std.mem.copy(u8, &bytes, slice_bytes);
+ return std.mem.readIntNative(T, &bytes);
+ } else if (str.len == max_bytes) {
+ return std.mem.readIntNative(T, str[0..str.len]);
+ } else {
+ @compileError("str is " ++ str.len ++ " bytes but expected " ++ max_bytes ++ " bytes");
+ }
}
- pub fn hash(str: anytype) ?T {
+ fn hash(comptime str: anytype) ?T {
if (str.len > max_bytes) return null;
var tmp = [_]u8{0} ** max_bytes;
std.mem.copy(u8, &tmp, str[0..str.len]);
return std.mem.readIntNative(T, &tmp);
}
-
- pub fn hashUnsafe(str: anytype) T {
- var tmp = [_]u8{0} ** max_bytes;
- std.mem.copy(u8, &tmp, str[0..str.len]);
- return std.mem.readIntNative(T, &tmp);
- }
};
}
const eight = ExactSizeMatcher(8);
-
+const expect = std.testing.expect;
test "ExactSizeMatcher 5 letter" {
const word = "yield";
- expect(eight.match(word) == eight.case("yield"));
- expect(eight.match(word) != eight.case("yields"));
+ try expect(eight.match(word) == eight.case("yield"));
+ try expect(eight.match(word) != eight.case("yields"));
}
test "ExactSizeMatcher 4 letter" {
const Four = ExactSizeMatcher(4);
const word = "from";
- expect(Four.match(word) == Four.case("from"));
- expect(Four.match(word) != Four.case("fro"));
+ try expect(Four.match(word) == Four.case("from"));
+ try expect(Four.match(word) != Four.case("fro"));
}
diff --git a/src/http.zig b/src/http.zig
index 608011528..abc7488cd 100644
--- a/src/http.zig
+++ b/src/http.zig
@@ -2,6 +2,7 @@
const std = @import("std");
usingnamespace @import("global.zig");
const Api = @import("./api/schema.zig").Api;
+const bundler = @import("bundler.zig");
const tcp = std.x.net.tcp;
const ip = std.x.net.ip;
@@ -11,17 +12,263 @@ const IPv6 = std.x.os.IPv6;
const Socket = std.x.os.Socket;
const os = std.os;
-const picohttp = @import("picohttp");
+const picohttp = @import("./deps/picohttp.zig");
const Header = picohttp.Header;
const Request = picohttp.Request;
const Response = picohttp.Response;
const Headers = picohttp.Headers;
+const MimeType = @import("http/mime_type.zig");
+const Bundler = bundler.Bundler;
+// This is a tiny HTTP server.
+// It needs to support:
+// - Static files
+// - ETags, If-Not-Modified-Since
+// - Bundling
+// - Content-Type header
+// - Content-Range header
+// Fancy things to support:
+// - Server-Timings for:
+// - Resolver time
+// - Parsing time
+// - IO read time
pub const Server = struct {
options: *Api.TransformOptions,
allocator: *std.mem.Allocator,
- threadlocal var headers_buf: [100]picohttp.Header = undefined;
+ threadlocal var req_headers_buf: [100]picohttp.Header = undefined;
+ threadlocal var res_headers_buf: [100]picohttp.Header = undefined;
+
+ pub const RequestContext = struct {
+ request: Request,
+ method: Method,
+ url: URLPath,
+ conn: *tcp.Connection,
+ bundler: *Bundler,
+ status: ?u5 = null,
+ has_written_last_header: bool = false,
+
+ res_headers_count: usize = 0,
+
+ pub const bundle_prefix = "__speedy";
+
+ pub fn header(ctx: *RequestContext, comptime name: anytype) ?Header {
+ for (ctx.request.headers) |header| {
+ if (strings.eql(header.name, name)) {
+ return header;
+ }
+ }
+ return null;
+ }
+
+ pub fn printStatusLine(comptime code: u9) ![]u8 {
+ const status_text = switch (code) {
+ 200...299 => "OK",
+ 300...399 => "=>",
+ 400...499 => "UH",
+ 500...599 => "ERR",
+ else => @compileError("Invalid code passed to printStatusLine"),
+ };
+
+ return try std.fmt.comptimePrint("HTTP/1.1 {s} \r\n", .{ code, status_text });
+ }
+
+ pub fn prepareToSendBody(
+ ctx: *RequestContext,
+ length: usize,
+ comptime chunked: bool,
+ ) !void {
+ if (isDebug or isTest) {
+ std.debug.assert(!ctx.has_written_last_header);
+ ctx.has_written_last_header = true;
+ }
+
+ if (chunked) {}
+ }
+
+ pub fn writeBodyBuf(ctx: *RequestContext) void {}
+
+ pub fn writeStatus(ctx: *RequestContext, comptime code: u9) !void {
+ _ = try ctx.conn.client.write(comptime printStatusLine(code), os.SOCK_CLOEXEC);
+ }
+
+ pub fn init(req: Request, conn: *tcp.Connection, bundler: *Bundler) !RequestContext {
+ return RequestContext{
+ .request = request,
+ .conn = conn,
+ .bundler = bundler,
+ .url = URLPath.parse(req.path),
+ .method = Method.which(req.method) orelse return error.InvalidMethod,
+ };
+ }
+
+ pub fn sendNotFound(req: *RequestContext) !void {
+ return req.writeStatus(404);
+ }
+
+ pub fn sendInternalError(ctx: *RequestContext, err: anytype) void {
+ ctx.writeStatus(500) catch {};
+ const printed = std.fmt.bufPrint(&error_buf, "Error: {s}", .{@errorName(err)}) catch {};
+ ctx.prepareToSendBody(printed.len, false) catch {};
+ ctx.writeBodyBuf(&printed) catch {};
+ }
+
+ threadlocal var error_buf: [4096]u8 = undefined;
+
+ pub fn appendHeader(ctx: *RequestContext, comptime key: string, value: string) void {
+ if (isDebug or isTest) std.debug.assert(!ctx.has_written_last_header);
+ if (isDebug or isTest) std.debug.assert(ctx.res_headers_count < res_headers_buf.len);
+ res_headers_buf[ctx.res_headers_count] = Header{ .key = key, .value = value };
+ ctx.res_headers_count += 1;
+ }
+
+ pub fn handleGet(ctx: *RequestContext) !void {
+ const result = ctx.bundler.buildFile(req.allocator, req.url) catch |err| {
+ ctx.sendInternalError(err);
+ return;
+ };
+
+ if (result.output.len == 0) {
+ return ctx.sendNotFound();
+ }
+
+ const file = result.output;
+
+ const mime_type = MimeType.byExtension(std.fs.path.extension(file));
+ ctx.appendHeader("Content-Type", mime_type.value);
+
+ return ctx.writeResult(result, mime_type);
+ }
+
+ pub fn handle(ctx: *RequestContext) !void {
+ switch (ctx.method) {
+ .GET, .HEAD, .OPTIONS => {
+ return ctx.handleGet();
+ },
+ else => {
+ return ctx.sendNotFound();
+ },
+ }
+ }
+
+ pub const Method = enum {
+ GET,
+ HEAD,
+ PATCH,
+ PUT,
+ POST,
+ OPTIONS,
+ CONNECT,
+ TRACE,
+
+ pub fn which(str: []const u8) ?Method {
+ if (str.len < 3) {
+ return null;
+ }
+ const Match = strings.ExactSizeMatcher(2);
+ // we already did the length check
+ switch (Match.hashUnsafe(str[0..2])) {
+ Match.case("GE"), Match.case("ge") => {
+ return .GET;
+ },
+ Match.case("HE"), Match.case("he") => {
+ return .HEAD;
+ },
+ Match.case("PA"), Match.case("pa") => {
+ return .PATCH;
+ },
+ Match.case("PO"), Match.case("po") => {
+ return .POST;
+ },
+ Match.case("PU"), Match.case("pu") => {
+ return .PUT;
+ },
+ Match.case("OP"), Match.case("op") => {
+ return .OPTIONS;
+ },
+ Match.case("CO"), Match.case("co") => {
+ return .CONNECT;
+ },
+ Match.case("TR"), Match.case("tr") => {
+ return .TRACE;
+ },
+ else => {
+ return null;
+ },
+ }
+ }
+ };
+ };
+
+ pub const URLPath = struct {
+ extname: string = "",
+ path: string = "",
+ first_segment: string = "",
+ query_string: string = "",
+
+ // This does one pass over the URL path instead of like 4
+ pub fn parse(raw_path: string) PathParser {
+ var question_mark_i: i16 = -1;
+ var period_i: i16 = -1;
+ var first_segment_end: i16 = std.math.maxInt(i16);
+ var last_slash: i16 = -1;
+
+ var i: i16 = raw_path.len - 1;
+ while (i >= 0) : (i -= 1) {
+ const c = raw_path[@intCast(usize, i)];
+
+ switch (c) {
+ '?' => {
+ question_mark_i = std.math.max(question_mark_i, i);
+ if (question_mark_i < period_i) {
+ period_i = -1;
+ }
+
+ if (last_slash > question_mark_i) {
+ last_slash = -1;
+ }
+ },
+ '.' => {
+ period_i = std.math.max(period_i, i);
+ },
+ '/' => {
+ last_slash = std.math.max(last_slash, i);
+
+ if (i > 0) {
+ first_segment_end = std.math.min(first_segment_end, i);
+ }
+ },
+ else => {},
+ }
+ }
+
+ if (last_slash > period_i) {
+ period_i = -1;
+ }
+
+ const extname = brk: {
+ if (question_mark_i > -1 and period_i > -1) {
+ period_i += 1;
+ break :brk raw_path[period_i..question_mark_i];
+ } else if (period_i > -1) {
+ period_i += 1;
+ break :brk raw_path[period_i..];
+ } else {
+ break :brk [_]u8{};
+ }
+ };
+
+ const path = raw_path[0..@intCast(usize, std.math.max(question_mark_i, raw_path.len))];
+ const first_segment = raw_path[0..std.math.min(@intCast(usize, first_segment_end), raw_path.len)];
+
+ return URLPath{
+ .extname = extname,
+ .first_segment = first_segment,
+ .path = path,
+ .query_string = if (question_mark_i > -1) raw_path[question_mark_i..raw_path.len] else "",
+ };
+ }
+ };
fn run(server: *Server) !void {
const listener = try tcp.Listener.init(.ip, os.SOCK_CLOEXEC);
@@ -30,6 +277,7 @@ pub const Server = struct {
listener.setReuseAddress(true) catch {};
listener.setReusePort(true) catch {};
listener.setFastOpen(true) catch {};
+
// try listener.ack(true);
try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 9000));
@@ -43,24 +291,11 @@ pub const Server = struct {
}
}
- pub fn writeStatus(server: *Server, comptime code: u9, conn: *tcp.Connection) !void {
- _ = try conn.client.write(std.fmt.comptimePrint("HTTP/1.1 {d}\r\n", .{code}), os.SOCK_CLOEXEC);
- }
-
pub fn sendError(server: *Server, request: *Request, conn: *tcp.Connection, code: u9, msg: string) !void {
try server.writeStatus(code, connection);
conn.deinit();
}
- pub fn handleRequest(server: *Server, request: *Request, conn: *tcp.Connection) !void {
- try server.writeStatus(200, conn);
- conn.deinit();
- // switch (request.method) {
- // .GET, .HEAD => {},
- // else => {},
- // }
- }
-
pub fn handleConnection(server: *Server, conn: *tcp.Connection) void {
errdefer conn.deinit();
// https://stackoverflow.com/questions/686217/maximum-on-http-header-values
@@ -68,17 +303,25 @@ pub const Server = struct {
var read_size = conn.client.read(&req_buf, os.SOCK_CLOEXEC) catch |err| {
return;
};
- var req = picohttp.Request.parse(req_buf[0..read_size], &headers_buf) catch |err| {
+ var req = picohttp.Request.parse(req_buf[0..read_size], &req_headers_buf) catch |err| {
Output.printError("ERR: {s}", .{@errorName(err)});
return;
};
- server.handleRequest(&req, conn) catch |err| {
- Output.printError("FAIL [{s}] - {s}: {s}", .{ @errorName(err), @tagName(req.method), req.path });
+
+ var req_ctx = RequestContext.init(req, conn) catch |err| {
+ Output.printError("FAIL [{s}] - {s}: {s}", .{ @errorName(err), req.method, req.path });
conn.deinit();
return;
};
- Output.print("[{s}] - {s}", .{ @tagName(req.method), req.path });
+
+ req_ctx.handle() catch |err| {
+ Output.printError("FAIL [{s}] - {s}: {s}", .{ @errorName(err), req.method, req.path });
+ conn.deinit();
+ return;
+ };
+
+ Output.print("{d} – {s} {s}", .{ req_ctx.status orelse 500, @tagName(req.method), req.path });
}
pub fn start(allocator: *std.mem.Allocator, options: *Api.TransformOptions) !void {
@@ -87,26 +330,3 @@ pub const Server = struct {
try server.run();
}
};
-
-// fn indexHandler(req: Request, res: Response) !void {
-// try res.write("hi\n");
-// }
-
-// fn aboutHandler(req: Request, res: Response) !void {
-// try res.write("Hello from about\n");
-// }
-
-// fn aboutHandler2(req: Request, res: Response) !void {
-// try res.write("Hello from about2\n");
-// }
-
-// fn postHandler(req: Request, res: Response, args: *const struct {
-// post_num: []const u8,
-// }) !void {
-// try res.print("Hello from post, post_num is {s}\n", .{args.post_num});
-// }
-
-// var counter = std.atomic.Int(usize).init(0);
-// fn counterHandler(req: Request, res: Response) !void {
-// try res.print("Page loaded {d} times\n", .{counter.fetchAdd(1)});
-// }
diff --git a/src/http/mime_type.zig b/src/http/mime_type.zig
new file mode 100644
index 000000000..141bee1a6
--- /dev/null
+++ b/src/http/mime_type.zig
@@ -0,0 +1,99 @@
+const std = @import("std");
+usingnamespace @import("global.zig");
+
+const Two = strings.ExactSizeMatcher(2);
+const Four = strings.ExactSizeMatcher(4);
+
+const MimeType = @This();
+
+value: string,
+category: Category,
+
+pub const Category = enum {
+ image,
+ text,
+ html,
+ font,
+ other,
+ json,
+ video,
+ javascript,
+ wasm,
+};
+
+pub const other = MimeType.init("application/octet-stream", .other);
+
+fn init(comptime str: string, t: Category) MimeType {
+ return comptime {
+ return MimeType{
+ .value = str,
+ .category = t,
+ };
+ };
+}
+
+// TODO: improve this
+pub fn byExtension(_ext: string) MimeType {
+ const ext = _ext[1..];
+ switch (ext.len) {
+ 2 => {
+ return switch (Two.hashUnsafe(ext)) {
+ Two.case("js") => MimeType.init("application/javascript;charset=utf-8", .javascript),
+ else => MimeType.other,
+ };
+ },
+ 3 => {
+ const four = [4]u8{ ext[0], ext[1], ext[2], 0 };
+ return switch (std.mem.readIntNative(u32, &four)) {
+ Four.case("css\\0") => MimeType.init("text/css;charset=utf-8", .css),
+ Four.case("jpg\\0") => MimeType.init("image/jpeg", .image),
+ Four.case("gif\\0") => MimeType.init("image/gif", .image),
+ Four.case("png\\0") => MimeType.init("image/png", .image),
+ Four.case("bmp\\0") => MimeType.init("image/bmp", .image),
+ Four.case("mjs\\0") => MimeType.init("text/javascript;charset=utf-8", .javascript),
+ Four.case("wav\\0") => MimeType.init("audio/wave", .audio),
+ Four.case("aac\\0") => MimeType.init("audio/aic", .audio),
+ Four.case("mp4\\0") => MimeType.init("video/mp4", .video),
+ Four.case("htm\\0") => MimeType.init("text/html;charset=utf-8", .html),
+ Four.case("xml\\0") => MimeType.init("text/xml", .other),
+ Four.case("zip\\0") => MimeType.init("application/zip", .other),
+ Four.case("txt\\0") => MimeType.init("text/plain", .other),
+ Four.case("ttf\\0") => MimeType.init("font/ttf", .font),
+ Four.case("otf\\0") => MimeType.init("font/otf", .font),
+ Four.case("ico\\0") => MimeType.init("image/vnd.microsoft.icon", .image),
+ Four.case("mp3\\0") => MimeType.init("audio/mpeg", .video),
+ Four.case("svg\\0") => MimeType.init("image/svg+xml", .image),
+ Four.case("csv\\0") => MimeType.init("text/csv", .other),
+ Four.case("mid\\0") => MimeType.init("audio/mid", .audio),
+ else => MimeType.other,
+ };
+ },
+ 4 => {
+ return switch (Four.hashUnsafe(ext)) {
+ Four.case("json") => MimeType.init("application/json", .json),
+ Four.case("jpeg") => MimeType.init("image/jpeg", .image),
+ Four.case("aiff") => MimeType.init("image/png", .image),
+ Four.case("tiff") => MimeType.init("image/tiff", .image),
+ Four.case("html") => MimeType.init("text/html;charset=utf-8", .html),
+ Four.case("wasm") => MimeType.init(
+ "application/wasm",
+ .wasm,
+ ),
+ Four.case("woff") => MimeType.init("font/woff", .font),
+ Four.case("webm") => MimeType.init("video/webm", .video),
+ Four.case("webp") => MimeType.init("image/webp", .image),
+ Four.case("midi") => MimeType.init("audio/midi", .audio),
+ else => MimeType.other,
+ };
+ },
+ 5 => {
+ const eight = [8]u8{ ext[0], ext[1], ext[2], ext[3], ext[4], 0, 0, 0 };
+ return switch (std.mem.readIntNative(u64, &eight)) {
+ Eight.case("woff2\\0\\0\\0") => MimeType.init("font/woff2", .font),
+ Eight.case("xhtml\\0\\0\\0") => MimeType.init("application/xhtml+xml", .html),
+ else => MimeType.other,
+ };
+ },
+ else => MimeType.other,
+ }
+}
diff --git a/src/options.zig b/src/options.zig
index f0c2f6f15..b11767172 100644
--- a/src/options.zig
+++ b/src/options.zig
@@ -284,7 +284,10 @@ pub const BundleOptions = struct {
react_fast_refresh: bool = false,
inject: ?[]string = null,
public_url: string = "",
+ public_dir: string = "public",
+ public_dir_enabled: bool = true,
output_dir: string = "",
+ public_dir_handle: ?std.fs.Dir = null,
write: bool = false,
preserve_symlinks: bool = false,
resolve_mode: api.Api.ResolveMode,
@@ -378,6 +381,59 @@ pub const BundleOptions = struct {
opts.main_fields = transform.main_fields;
}
+ if (transform.serve orelse false) {
+ opts.resolve_mode = .lazy;
+ var _dirs = [_]string{transform.public_dir orelse opts.public_dir};
+ opts.public_dir = try fs.joinAlloc(allocator, &_dirs);
+ opts.public_dir_handle = std.fs.openDirAbsolute(opts.public_dir, .{ .iterate = true }) catch |err| brk: {
+ var did_warn = false;
+ switch (err) {
+ error.FileNotFound => {
+ // Be nice.
+ // Check "static" since sometimes people use that instead.
+ // Don't switch to it, but just tell "hey try --public-dir=static" next time
+ if (transform.public_dir == null or transform.public_dir.?.len == 0) {
+ _dirs[0] = "static";
+ const check_static = try fs.joinAlloc(allocator, &_dirs);
+ defer allocator.free(check_static);
+
+ std.fs.accessAbsolute(check_static, .{}) catch {
+ Output.printError("warn: \"public\" folder missing. If there are external assets used in your project, pass --public-dir=\"public-folder-name\"", .{});
+ did_warn = true;
+ };
+ }
+
+ if (!did_warn) {
+ Output.printError("warn: \"public\" folder missing. If you want to use \"static\" as the public folder, pass --public-dir=\"static\".", .{});
+ }
+ opts.public_dir_enabled = false;
+ },
+ error.AccessDenied => {
+ Output.printError(
+ "error: access denied when trying to open public_dir: \"{s}\".\nPlease re-open Speedy with access to this folder or pass a different folder via \"--public-dir\". Note: --public-dir is relative to --cwd (or the process' current working directory).\n\nThe public folder is where static assets such as images, fonts, and .html files go.",
+ .{opts.public_dir},
+ );
+ std.process.exit(1);
+ },
+ else => {
+ Output.printError(
+ "error: \"{s}\" when accessing public folder: \"{s}\"",
+ .{ @errorName(err), opts.public_dir },
+ );
+ std.process.exit(1);
+ },
+ }
+
+ break :brk null;
+ };
+
+ // Windows has weird locking rules for files
+ // so it's a bad idea to keep a file handle open for a long time on Windows.
+ if (isWindows and opts.public_dir_handle != null) {
+ opts.public_dir_handle.?.close();
+ }
+ }
+
return opts;
}
};
diff --git a/src/string_immutable.zig b/src/string_immutable.zig
index ab19f4dc9..ec233905f 100644
--- a/src/string_immutable.zig
+++ b/src/string_immutable.zig
@@ -97,33 +97,61 @@ pub fn eql(self: string, other: anytype) bool {
}
// I have not actually verified that this makes it faster
// It's probably like 0.0001ms faster
-pub fn eqlComptime(self: string, comptime alt: string) bool {
- comptime var matcher_size: usize = 0;
-
+pub fn eqlComptime(self: string, comptime alt: anytype) bool {
switch (comptime alt.len) {
0 => {
@compileError("Invalid size passed to eqlComptime");
},
- 1...4 => {
- matcher_size = 4;
+ 1...3 => {
+ if (alt.len != self.len) {
+ return false;
+ }
+
+ inline for (alt) |c, i| {
+ if (self[i] != c) return false;
+ }
+ return true;
+ },
+ 4 => {
+ comptime const check = std.mem.readIntNative(u32, alt[0..alt.len]);
+ return self.len == alt.len and std.mem.readIntNative(u32, self[0..4]) == check;
},
- 5...8 => {
- matcher_size = 8;
+ 5...7 => {
+ comptime const check = std.mem.readIntNative(u32, alt[0..4]);
+ if (self.len != alt.len or std.mem.readIntNative(u32, self[0..4]) != check) {
+ return false;
+ }
+ const remainder = self[4..];
+ inline for (alt[4..]) |c, i| {
+ if (remainder[i] != c) return false;
+ }
+ return true;
+ },
+ 8 => {
+ comptime const check = std.mem.readIntNative(u64, alt[0..alt.len]);
+ return self.len == alt.len and std.mem.readIntNative(u64, self[0..8]) == check;
+ },
+ 9...11 => {
+ comptime const first = std.mem.readIntNative(u64, alt[0..8]);
+
+ if (self.len != alt.len or first != std.mem.readIntNative(u64, self[0..8])) {
+ return false;
+ }
+
+ inline for (alt[8..]) |c, i| {
+ if (self[i + 8] != c) return false;
+ }
+ return true;
},
- 8...12 => {
- comptime const FirstMatcher = ExactSizeMatcher(8);
- comptime const SecondMatcher = ExactSizeMatcher(4);
- comptime const first = FirstMatcher.case(alt[0..8]);
- comptime const second = SecondMatcher.case(alt[8..alt.len]);
- return (self.len == alt.len) and first == FirstMatcher.hashUnsafe(self[0..8]) and second == SecondMatcher.match(self[8..self.len]);
+ 12 => {
+ comptime const first = std.mem.readIntNative(u64, alt[0..8]);
+ comptime const second = std.mem.readIntNative(u32, alt[8..12]);
+ return (self.len == alt.len) and first == std.mem.readIntNative(u64, self[0..8]) and second == std.mem.readIntNative(u32, self[8..12]);
},
else => {
@compileError(alt ++ " is too long.");
},
}
- comptime const Matcher = ExactSizeMatcher(matcher_size);
- comptime const alt_hash = Matcher.case(alt);
- return Matcher.match(self) == alt_hash;
}
pub fn append(allocator: *std.mem.Allocator, self: string, other: string) !string {