diff options
author | 2022-04-03 16:34:10 -0700 | |
---|---|---|
committer | 2022-04-03 16:34:10 -0700 | |
commit | a87508008dfa1604baf2d4e39bf44704c00f261c (patch) | |
tree | 0be2ade96772037a02803b30e157c367d931e3d9 | |
parent | 4a19a3f07f1887903e5638a3be167f0c7b377ba3 (diff) | |
download | bun-jarred/canvas.tar.gz bun-jarred/canvas.tar.zst bun-jarred/canvas.zip |
skia WIPjarred/canvas
343 files changed, 71825 insertions, 32 deletions
@@ -195,10 +195,12 @@ MAC_INCLUDE_DIRS := -I$(WEBKIT_RELEASE_DIR)/JavaScriptCore/PrivateHeaders \ -Isrc/javascript/jsc/bindings/ \ -Isrc/javascript/jsc/bindings/webcore \ -I$(WEBKIT_DIR)/Source/bmalloc \ - -I$(WEBKIT_DIR)/Source + -I$(WEBKIT_DIR)/Source \ + -I$(BUN_DEPS_DIR)/skia LINUX_INCLUDE_DIRS := -I$(JSC_INCLUDE_DIR) \ - -Isrc/javascript/jsc/bindings/ + -Isrc/javascript/jsc/bindings/ \ + -I$(BUN_DEPS_DIR)/skia UWS_INCLUDE_DIR := -I$(BUN_DEPS_DIR)/uws/uSockets/src -I$(BUN_DEPS_DIR)/uws/src -I$(BUN_DEPS_DIR) @@ -295,7 +297,8 @@ ARCHIVE_FILES_WITHOUT_LIBCRYPTO = $(MIMALLOC_FILE_PATH) \ $(BUN_DEPS_OUT_DIR)/picohttpparser.o \ $(BUN_DEPS_OUT_DIR)/liblolhtml.a \ $(BUN_DEPS_OUT_DIR)/uSockets.a \ - $(BUN_DEPS_OUT_DIR)/libuwsockets.o + $(BUN_DEPS_OUT_DIR)/libuwsockets.o \ + $(BUN_DEPS_OUT_DIR)/skia.a ARCHIVE_FILES = $(ARCHIVE_FILES_WITHOUT_LIBCRYPTO) $(BUN_DEPS_OUT_DIR)/libcrypto.boring.a diff --git a/examples/bun/canvas.ts b/examples/bun/canvas.ts new file mode 100644 index 000000000..b601adc1d --- /dev/null +++ b/examples/bun/canvas.ts @@ -0,0 +1,20 @@ +// The goal of this stream is for this code to work. +// The likelihood of that happening is pretty low. +// but it's worth an attempt! +const canvas = new OffscreenCanvas(256, 256); +const ctx = canvas.getContext("2d"); + +const imageData = new ImageData(256, 256); +// one red pixel +imageData.data[0] = 255; +imageData.data[1] = 0; +imageData.data[2] = 0; + +console.log(imageData); + +// ctx.drawImage(imageData, 0, 0); + +// const blob = await canvas.convertToBlob({ type: "image/png" }); +// await Bun.write("hello.png", blob); + +// export {}; diff --git a/src/deps/skia/include/OWNERS b/src/deps/skia/include/OWNERS new file mode 100644 index 000000000..2d6647f75 --- /dev/null +++ b/src/deps/skia/include/OWNERS @@ -0,0 +1,16 @@ +set noparent + +# Include one of the following reviewers for CLs that add or change Skia's public API: +brianosman@google.com +bsalomon@google.com +djsollen@google.com +hcm@google.com +egdaniel@google.com + +# For revert purposes only +rubber-stamper@appspot.gserviceaccount.com + +per-file BUILD.bazel=bungeman@google.com +per-file BUILD.bazel=jcgregorio@google.com +per-file BUILD.bazel=kjlubick@google.com +per-file BUILD.bazel=lovisolo@google.com
\ No newline at end of file diff --git a/src/deps/skia/include/android/BUILD.bazel b/src/deps/skia/include/android/BUILD.bazel new file mode 100644 index 000000000..0ee0f5aa7 --- /dev/null +++ b/src/deps/skia/include/android/BUILD.bazel @@ -0,0 +1,24 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkAndroidFrameworkUtils_hdr", + hdrs = ["SkAndroidFrameworkUtils.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkAnimatedImage_hdr", + hdrs = ["SkAnimatedImage.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/codec:SkCodecAnimation_hdr", + "//include/core:SkBitmap_hdr", + "//include/core:SkDrawable_hdr", + "//include/core:SkMatrix_hdr", + "//include/core:SkRect_hdr", + ], +) diff --git a/src/deps/skia/include/android/SkAndroidFrameworkUtils.h b/src/deps/skia/include/android/SkAndroidFrameworkUtils.h new file mode 100644 index 000000000..577bfab72 --- /dev/null +++ b/src/deps/skia/include/android/SkAndroidFrameworkUtils.h @@ -0,0 +1,61 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkAndroidFrameworkUtils_DEFINED +#define SkAndroidFrameworkUtils_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" + +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + +class SkCanvas; +struct SkIRect; +struct SkRect; +class SkSurface; + +/** + * SkAndroidFrameworkUtils expose private APIs used only by Android framework. + */ +class SkAndroidFrameworkUtils { +public: + +#if SK_SUPPORT_GPU + /** + * clipWithStencil draws the current clip into a stencil buffer with reference value and mask + * set to 0x1. This function works only on a GPU canvas. + * + * @param canvas A GPU canvas that has a non-empty clip. + * + * @return true on success or false if clip is empty or not a GPU canvas. + */ + static bool clipWithStencil(SkCanvas* canvas); +#endif //SK_SUPPORT_GPU + + static void SafetyNetLog(const char*); + + static sk_sp<SkSurface> getSurfaceFromCanvas(SkCanvas* canvas); + + static int SaveBehind(SkCanvas* canvas, const SkRect* subset); + + // Operating within the canvas' clip stack, this resets the geometry of the clip to be wide + // open modula any device clip restriction that was set outside of the clip stack. + static void ResetClip(SkCanvas* canvas); + + /** + * Unrolls a chain of nested SkPaintFilterCanvas to return the base wrapped canvas. + * + * @param canvas A SkPaintFilterCanvas or any other SkCanvas subclass. + * + * @return SkCanvas that was found in the innermost SkPaintFilterCanvas. + */ + static SkCanvas* getBaseWrappedCanvas(SkCanvas* canvas); +}; + +#endif // SK_BUILD_FOR_ANDROID_ANDROID + +#endif // SkAndroidFrameworkUtils_DEFINED diff --git a/src/deps/skia/include/android/SkAnimatedImage.h b/src/deps/skia/include/android/SkAnimatedImage.h new file mode 100644 index 000000000..8143c1722 --- /dev/null +++ b/src/deps/skia/include/android/SkAnimatedImage.h @@ -0,0 +1,179 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkAnimatedImage_DEFINED +#define SkAnimatedImage_DEFINED + +#include "include/codec/SkCodecAnimation.h" +#include "include/core/SkBitmap.h" +#include "include/core/SkDrawable.h" +#include "include/core/SkMatrix.h" +#include "include/core/SkRect.h" + +class SkAndroidCodec; +class SkImage; +class SkPicture; + +/** + * Thread unsafe drawable for drawing animated images (e.g. GIF). + */ +class SK_API SkAnimatedImage : public SkDrawable { +public: + /** + * Create an SkAnimatedImage from the SkAndroidCodec. + * + * Returns null on failure to allocate pixels. On success, this will + * decode the first frame. + * + * @param info Width and height may require scaling. + * @param cropRect Rectangle to crop to after scaling. + * @param postProcess Picture to apply after scaling and cropping. + */ + static sk_sp<SkAnimatedImage> Make(std::unique_ptr<SkAndroidCodec>, + const SkImageInfo& info, SkIRect cropRect, sk_sp<SkPicture> postProcess); + + /** + * Simpler version that uses the default size, no cropping, and no postProcess. + */ + static sk_sp<SkAnimatedImage> Make(std::unique_ptr<SkAndroidCodec>); + + ~SkAnimatedImage() override; + + /** + * Reset the animation to the beginning. + */ + void reset(); + + /** + * Whether the animation completed. + * + * Returns true after all repetitions are complete, or an error stops the + * animation. Gets reset to false if the animation is restarted. + */ + bool isFinished() const { return fFinished; } + + /** + * Returned by decodeNextFrame and currentFrameDuration if the animation + * is not running. + */ + static constexpr int kFinished = -1; + + /** + * Decode the next frame. + * + * If the animation is on the last frame or has hit an error, returns + * kFinished. + */ + int decodeNextFrame(); + + /** + * Returns the current frame as an SkImage. The SkImage will not change + * after it has been returned. + * If there is no current frame, nullptr will be returned. + */ + sk_sp<SkImage> getCurrentFrame(); + + /** + * How long to display the current frame. + * + * Useful for the first frame, for which decodeNextFrame is called + * internally. + */ + int currentFrameDuration() { + return fCurrentFrameDuration; + } + + /** + * Change the repetition count. + * + * By default, the image will repeat the number of times indicated in the + * encoded data. + * + * Use SkCodec::kRepetitionCountInfinite for infinite, and 0 to show all + * frames once and then stop. + */ + void setRepetitionCount(int count); + + /** + * Return the currently set repetition count. + */ + int getRepetitionCount() const { + return fRepetitionCount; + } + + /** + * Return the total number of frames in the animation. + */ + int getFrameCount() const { return fFrameCount; } + +protected: + SkRect onGetBounds() override; + void onDraw(SkCanvas*) override; + +private: + struct Frame { + SkBitmap fBitmap; + int fIndex; + SkCodecAnimation::DisposalMethod fDisposalMethod; + + // init() may have to create a new SkPixelRef, if the + // current one is already in use by another owner (e.g. + // an SkPicture). This determines whether to copy the + // existing one to the new one. + enum class OnInit { + // Restore the image from the old SkPixelRef to the + // new one. + kRestoreIfNecessary, + // No need to restore. + kNoRestore, + }; + + Frame(); + bool init(const SkImageInfo& info, OnInit); + bool copyTo(Frame*) const; + }; + + std::unique_ptr<SkAndroidCodec> fCodec; + SkImageInfo fDecodeInfo; + const SkIRect fCropRect; + const sk_sp<SkPicture> fPostProcess; + const int fFrameCount; + SkMatrix fMatrix; + int fSampleSize; + + bool fFinished; + int fCurrentFrameDuration; + Frame fDisplayFrame; + Frame fDecodingFrame; + Frame fRestoreFrame; + int fRepetitionCount; + int fRepetitionsCompleted; + + SkAnimatedImage(std::unique_ptr<SkAndroidCodec>, const SkImageInfo& requestedInfo, + SkIRect cropRect, sk_sp<SkPicture> postProcess); + + int computeNextFrame(int current, bool* animationEnded); + double finish(); + + /** + * True if there is no crop, orientation, or post decoding scaling. + */ + bool simple() const { return fMatrix.isIdentity() && !fPostProcess + && fCropRect == fDecodeInfo.bounds(); } + + /** + * Returns the current frame as an SkImage. + * + * Like getCurrentFrame, but only returns the raw data from the internal SkBitmap. (i.e. no + * scaling, orientation-correction or cropping.) If simple(), this is the final output. + */ + sk_sp<SkImage> getCurrentFrameSimple(); + + using INHERITED = SkDrawable; +}; + +#endif // SkAnimatedImage_DEFINED diff --git a/src/deps/skia/include/c/BUILD.bazel b/src/deps/skia/include/c/BUILD.bazel new file mode 100644 index 000000000..ffb4f7f6b --- /dev/null +++ b/src/deps/skia/include/c/BUILD.bazel @@ -0,0 +1,91 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "sk_canvas_hdr", + hdrs = ["sk_canvas.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_colorspace_hdr", + hdrs = ["sk_colorspace.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_data_hdr", + hdrs = ["sk_data.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_image_hdr", + hdrs = ["sk_image.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_imageinfo_hdr", + hdrs = ["sk_imageinfo.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_maskfilter_hdr", + hdrs = ["sk_maskfilter.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_matrix_hdr", + hdrs = ["sk_matrix.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_paint_hdr", + hdrs = ["sk_paint.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_path_hdr", + hdrs = ["sk_path.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_picture_hdr", + hdrs = ["sk_picture.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_shader_hdr", + hdrs = ["sk_shader.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_surface_hdr", + hdrs = ["sk_surface.h"], + visibility = ["//:__subpackages__"], + deps = [":sk_types_hdr"], +) + +generated_cc_atom( + name = "sk_types_hdr", + hdrs = ["sk_types.h"], + visibility = ["//:__subpackages__"], +) diff --git a/src/deps/skia/include/c/sk_canvas.h b/src/deps/skia/include/c/sk_canvas.h new file mode 100644 index 000000000..751c07ba3 --- /dev/null +++ b/src/deps/skia/include/c/sk_canvas.h @@ -0,0 +1,159 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_canvas_DEFINED +#define sk_canvas_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +/** + Save the current matrix and clip on the canvas. When the + balancing call to sk_canvas_restore() is made, the previous matrix + and clip are restored. +*/ +SK_API void sk_canvas_save(sk_canvas_t*); +/** + This behaves the same as sk_canvas_save(), but in addition it + allocates an offscreen surface. All drawing calls are directed + there, and only when the balancing call to sk_canvas_restore() is + made is that offscreen transfered to the canvas (or the previous + layer). + + @param sk_rect_t* (may be null) This rect, if non-null, is used as + a hint to limit the size of the offscreen, and + thus drawing may be clipped to it, though that + clipping is not guaranteed to happen. If exact + clipping is desired, use sk_canvas_clip_rect(). + @param sk_paint_t* (may be null) The paint is copied, and is applied + to the offscreen when sk_canvas_restore() is + called. +*/ +SK_API void sk_canvas_save_layer(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*); +/** + This call balances a previous call to sk_canvas_save() or + sk_canvas_save_layer(), and is used to remove all modifications to + the matrix and clip state since the last save call. It is an + error to call sk_canvas_restore() more times than save and + save_layer were called. +*/ +SK_API void sk_canvas_restore(sk_canvas_t*); + +/** + Preconcat the current coordinate transformation matrix with the + specified translation. +*/ +SK_API void sk_canvas_translate(sk_canvas_t*, float dx, float dy); +/** + Preconcat the current coordinate transformation matrix with the + specified scale. +*/ +SK_API void sk_canvas_scale(sk_canvas_t*, float sx, float sy); +/** + Preconcat the current coordinate transformation matrix with the + specified rotation in degrees. +*/ +SK_API void sk_canvas_rotate_degrees(sk_canvas_t*, float degrees); +/** + Preconcat the current coordinate transformation matrix with the + specified rotation in radians. +*/ +SK_API void sk_canvas_rotate_radians(sk_canvas_t*, float radians); +/** + Preconcat the current coordinate transformation matrix with the + specified skew. +*/ +SK_API void sk_canvas_skew(sk_canvas_t*, float sx, float sy); +/** + Preconcat the current coordinate transformation matrix with the + specified matrix. +*/ +SK_API void sk_canvas_concat(sk_canvas_t*, const sk_matrix_t*); + +/** + Modify the current clip with the specified rectangle. The new + current clip will be the intersection of the old clip and the + rectange. +*/ +SK_API void sk_canvas_clip_rect(sk_canvas_t*, const sk_rect_t*); +/** + Modify the current clip with the specified path. The new + current clip will be the intersection of the old clip and the + path. +*/ +SK_API void sk_canvas_clip_path(sk_canvas_t*, const sk_path_t*); + +/** + Fill the entire canvas (restricted to the current clip) with the + specified paint. +*/ +SK_API void sk_canvas_draw_paint(sk_canvas_t*, const sk_paint_t*); +/** + Draw the specified rectangle using the specified paint. The + rectangle will be filled or stroked based on the style in the + paint. +*/ +SK_API void sk_canvas_draw_rect(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*); +/** + * Draw the circle centered at (cx, cy) with radius rad using the specified paint. + * The circle will be filled or framed based on the style in the paint + */ +SK_API void sk_canvas_draw_circle(sk_canvas_t*, float cx, float cy, float rad, const sk_paint_t*); +/** + Draw the specified oval using the specified paint. The oval will be + filled or framed based on the style in the paint +*/ +SK_API void sk_canvas_draw_oval(sk_canvas_t*, const sk_rect_t*, const sk_paint_t*); +/** + Draw the specified path using the specified paint. The path will be + filled or framed based on the style in the paint +*/ +SK_API void sk_canvas_draw_path(sk_canvas_t*, const sk_path_t*, const sk_paint_t*); +/** + Draw the specified image, with its top/left corner at (x,y), using + the specified paint, transformed by the current matrix. + + @param sk_paint_t* (may be NULL) the paint used to draw the image. +*/ +SK_API void sk_canvas_draw_image(sk_canvas_t*, const sk_image_t*, float x, float y, + const sk_sampling_options_t*, const sk_paint_t*); +/** + Draw the specified image, scaling and translating so that it fills + the specified dst rect. If the src rect is non-null, only that + subset of the image is transformed and drawn. + + @param sk_paint_t* (may be NULL) The paint used to draw the image. +*/ +SK_API void sk_canvas_draw_image_rect(sk_canvas_t*, const sk_image_t*, + const sk_rect_t* src, const sk_rect_t* dst, + const sk_sampling_options_t*, const sk_paint_t*); + +/** + Draw the picture into this canvas (replay the pciture's drawing commands). + + @param sk_matrix_t* If non-null, apply that matrix to the CTM when + drawing this picture. This is logically + equivalent to: save, concat, draw_picture, + restore. + + @param sk_paint_t* If non-null, draw the picture into a temporary + buffer, and then apply the paint's alpha, + colorfilter, imagefilter, and xfermode to that + buffer as it is drawn to the canvas. This is + logically equivalent to save_layer(paint), + draw_picture, restore. +*/ +SK_API void sk_canvas_draw_picture(sk_canvas_t*, const sk_picture_t*, + const sk_matrix_t*, const sk_paint_t*); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_colorspace.h b/src/deps/skia/include/c/sk_colorspace.h new file mode 100644 index 000000000..31839840d --- /dev/null +++ b/src/deps/skia/include/c/sk_colorspace.h @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_colorspace_DEFINED +#define sk_colorspace_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +SK_API sk_colorspace_t* sk_colorspace_new_srgb(); + +SK_API void sk_colorspace_ref(sk_colorspace_t*); +SK_API void sk_colorspace_unref(sk_colorspace_t*); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_data.h b/src/deps/skia/include/c/sk_data.h new file mode 100644 index 000000000..a330b92f2 --- /dev/null +++ b/src/deps/skia/include/c/sk_data.h @@ -0,0 +1,65 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_data_DEFINED +#define sk_data_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +/** + Returns a new sk_data_t by copying the specified source data. + This call must be balanced with a call to sk_data_unref(). +*/ +SK_API sk_data_t* sk_data_new_with_copy(const void* src, size_t length); +/** + Pass ownership of the given memory to a new sk_data_t, which will + call free() when the refernce count of the data goes to zero. For + example: + size_t length = 1024; + void* buffer = malloc(length); + memset(buffer, 'X', length); + sk_data_t* data = sk_data_new_from_malloc(buffer, length); + This call must be balanced with a call to sk_data_unref(). +*/ +SK_API sk_data_t* sk_data_new_from_malloc(const void* memory, size_t length); +/** + Returns a new sk_data_t using a subset of the data in the + specified source sk_data_t. This call must be balanced with a + call to sk_data_unref(). +*/ +SK_API sk_data_t* sk_data_new_subset(const sk_data_t* src, size_t offset, size_t length); + +/** + Increment the reference count on the given sk_data_t. Must be + balanced by a call to sk_data_unref(). +*/ +SK_API void sk_data_ref(const sk_data_t*); +/** + Decrement the reference count. If the reference count is 1 before + the decrement, then release both the memory holding the sk_data_t + and the memory it is managing. New sk_data_t are created with a + reference count of 1. +*/ +SK_API void sk_data_unref(const sk_data_t*); + +/** + Returns the number of bytes stored. +*/ +SK_API size_t sk_data_get_size(const sk_data_t*); +/** + Returns the pointer to the data. + */ +SK_API const void* sk_data_get_data(const sk_data_t*); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_image.h b/src/deps/skia/include/c/sk_image.h new file mode 100644 index 000000000..d8ddc145d --- /dev/null +++ b/src/deps/skia/include/c/sk_image.h @@ -0,0 +1,71 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_image_DEFINED +#define sk_image_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +/** + * Return a new image that has made a copy of the provided pixels, or NULL on failure. + * Balance with a call to sk_image_unref(). + */ +SK_API sk_image_t* sk_image_new_raster_copy(const sk_imageinfo_t*, const void* pixels, size_t rowBytes); + +/** + * If the specified data can be interpreted as a compressed image (e.g. PNG or JPEG) then this + * returns an image. If the encoded data is not supported, returns NULL. + * + * On success, the encoded data may be processed immediately, or it may be ref()'d for later + * use. + */ +SK_API sk_image_t* sk_image_new_from_encoded(const sk_data_t* encoded); + +/** + * Encode the image's pixels and return the result as a new PNG in a + * sk_data_t, which the caller must manage: call sk_data_unref() when + * they are done. + * + * If the image type cannot be encoded, this will return NULL. + */ +SK_API sk_data_t* sk_image_encode(const sk_image_t*); + +/** + * Increment the reference count on the given sk_image_t. Must be + * balanced by a call to sk_image_unref(). +*/ +SK_API void sk_image_ref(const sk_image_t*); +/** + * Decrement the reference count. If the reference count is 1 before + * the decrement, then release both the memory holding the sk_image_t + * and the memory it is managing. New sk_image_t are created with a + reference count of 1. +*/ +SK_API void sk_image_unref(const sk_image_t*); + +/** + * Return the width of the sk_image_t/ + */ +SK_API int sk_image_get_width(const sk_image_t*); +/** + * Return the height of the sk_image_t/ + */ +SK_API int sk_image_get_height(const sk_image_t*); + +/** + * Returns a non-zero value unique among all images. + */ +SK_API uint32_t sk_image_get_unique_id(const sk_image_t*); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_imageinfo.h b/src/deps/skia/include/c/sk_imageinfo.h new file mode 100644 index 000000000..6c8e9fff2 --- /dev/null +++ b/src/deps/skia/include/c/sk_imageinfo.h @@ -0,0 +1,62 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_imageinfo_DEFINED +#define sk_imageinfo_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +typedef enum { + UNKNOWN_SK_COLORTYPE, + RGBA_8888_SK_COLORTYPE, + BGRA_8888_SK_COLORTYPE, + ALPHA_8_SK_COLORTYPE, + GRAY_8_SK_COLORTYPE, + RGBA_F16_SK_COLORTYPE, + RGBA_F32_SK_COLORTYPE, +} sk_colortype_t; + +typedef enum { + OPAQUE_SK_ALPHATYPE, + PREMUL_SK_ALPHATYPE, + UNPREMUL_SK_ALPHATYPE, +} sk_alphatype_t; + +/** + * Allocate a new imageinfo object. If colorspace is not null, it's owner-count will be + * incremented automatically. + */ +SK_API sk_imageinfo_t* sk_imageinfo_new(int width, int height, sk_colortype_t ct, sk_alphatype_t at, + sk_colorspace_t* cs); + +/** + * Free the imageinfo object. If it contains a reference to a colorspace, its owner-count will + * be decremented automatically. + */ +SK_API void sk_imageinfo_delete(sk_imageinfo_t*); + +SK_API int32_t sk_imageinfo_get_width(const sk_imageinfo_t*); +SK_API int32_t sk_imageinfo_get_height(const sk_imageinfo_t*); +SK_API sk_colortype_t sk_imageinfo_get_colortype(const sk_imageinfo_t*); +SK_API sk_alphatype_t sk_imageinfo_get_alphatype(const sk_imageinfo_t*); + +/** + * Return the colorspace object reference contained in the imageinfo, or null if there is none. + * Note: this does not modify the owner-count on the colorspace object. If the caller needs to + * use the colorspace beyond the lifetime of the imageinfo, it should manually call + * sk_colorspace_ref() (and then call unref() when it is done). + */ +SK_API sk_colorspace_t* sk_imageinfo_get_colorspace(const sk_imageinfo_t*); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_maskfilter.h b/src/deps/skia/include/c/sk_maskfilter.h new file mode 100644 index 000000000..c8aa7ed44 --- /dev/null +++ b/src/deps/skia/include/c/sk_maskfilter.h @@ -0,0 +1,47 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_maskfilter_DEFINED +#define sk_maskfilter_DEFINED + +#include "include/c/sk_types.h" + +typedef enum { + NORMAL_SK_BLUR_STYLE, //!< fuzzy inside and outside + SOLID_SK_BLUR_STYLE, //!< solid inside, fuzzy outside + OUTER_SK_BLUR_STYLE, //!< nothing inside, fuzzy outside + INNER_SK_BLUR_STYLE, //!< fuzzy inside, nothing outside +} sk_blurstyle_t; + +SK_C_PLUS_PLUS_BEGIN_GUARD + +/** + Increment the reference count on the given sk_maskfilter_t. Must be + balanced by a call to sk_maskfilter_unref(). +*/ +SK_API void sk_maskfilter_ref(sk_maskfilter_t*); +/** + Decrement the reference count. If the reference count is 1 before + the decrement, then release both the memory holding the + sk_maskfilter_t and any other associated resources. New + sk_maskfilter_t are created with a reference count of 1. +*/ +SK_API void sk_maskfilter_unref(sk_maskfilter_t*); + +/** + Create a blur maskfilter. + @param sk_blurstyle_t The SkBlurStyle to use + @param sigma Standard deviation of the Gaussian blur to apply. Must be > 0. +*/ +SK_API sk_maskfilter_t* sk_maskfilter_new_blur(sk_blurstyle_t, float sigma); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_matrix.h b/src/deps/skia/include/c/sk_matrix.h new file mode 100644 index 000000000..244863c4f --- /dev/null +++ b/src/deps/skia/include/c/sk_matrix.h @@ -0,0 +1,49 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_matrix_DEFINED +#define sk_matrix_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +/** Set the matrix to identity */ +void sk_matrix_set_identity(sk_matrix_t*); + +/** Set the matrix to translate by (tx, ty). */ +void sk_matrix_set_translate(sk_matrix_t*, float tx, float ty); +/** + Preconcats the matrix with the specified translation. + M' = M * T(dx, dy) +*/ +void sk_matrix_pre_translate(sk_matrix_t*, float tx, float ty); +/** + Postconcats the matrix with the specified translation. + M' = T(dx, dy) * M +*/ +void sk_matrix_post_translate(sk_matrix_t*, float tx, float ty); + +/** Set the matrix to scale by sx and sy. */ +void sk_matrix_set_scale(sk_matrix_t*, float sx, float sy); +/** + Preconcats the matrix with the specified scale. + M' = M * S(sx, sy) +*/ +void sk_matrix_pre_scale(sk_matrix_t*, float sx, float sy); +/** + Postconcats the matrix with the specified scale. + M' = S(sx, sy) * M +*/ +void sk_matrix_post_scale(sk_matrix_t*, float sx, float sy); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_paint.h b/src/deps/skia/include/c/sk_paint.h new file mode 100644 index 000000000..98ba4954c --- /dev/null +++ b/src/deps/skia/include/c/sk_paint.h @@ -0,0 +1,145 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_paint_DEFINED +#define sk_paint_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +/** + Create a new paint with default settings: + antialias : false + stroke : false + stroke width : 0.0f (hairline) + stroke miter : 4.0f + stroke cap : BUTT_SK_STROKE_CAP + stroke join : MITER_SK_STROKE_JOIN + color : opaque black + shader : NULL + maskfilter : NULL + xfermode_mode : SRCOVER_SK_XFERMODE_MODE +*/ +SK_API sk_paint_t* sk_paint_new(void); +/** + Release the memory storing the sk_paint_t and unref() all + associated objects. +*/ +SK_API void sk_paint_delete(sk_paint_t*); + +/** + Return true iff the paint has antialiasing enabled. +*/ +SK_API bool sk_paint_is_antialias(const sk_paint_t*); +/** + Set to true to enable antialiasing, false to disable it on this + sk_paint_t. +*/ +SK_API void sk_paint_set_antialias(sk_paint_t*, bool); + +/** + Return the paint's curent drawing color. +*/ +SK_API sk_color_t sk_paint_get_color(const sk_paint_t*); +/** + Set the paint's curent drawing color. +*/ +SK_API void sk_paint_set_color(sk_paint_t*, sk_color_t); + +/* stroke settings */ + +/** + Return true iff stroking is enabled rather than filling on this + sk_paint_t. +*/ +SK_API bool sk_paint_is_stroke(const sk_paint_t*); +/** + Set to true to enable stroking rather than filling with this + sk_paint_t. +*/ +SK_API void sk_paint_set_stroke(sk_paint_t*, bool); + +/** + Return the width for stroking. A value of 0 strokes in hairline mode. + */ +SK_API float sk_paint_get_stroke_width(const sk_paint_t*); +/** + Set the width for stroking. A value of 0 strokes in hairline mode + (always draw 1-pixel wide, regardless of the matrix). + */ +SK_API void sk_paint_set_stroke_width(sk_paint_t*, float width); + +/** + Return the paint's stroke miter value. This is used to control the + behavior of miter joins when the joins angle is sharp. +*/ +SK_API float sk_paint_get_stroke_miter(const sk_paint_t*); +/** + Set the paint's stroke miter value. This is used to control the + behavior of miter joins when the joins angle is sharp. This value + must be >= 0. +*/ +SK_API void sk_paint_set_stroke_miter(sk_paint_t*, float miter); + +typedef enum { + BUTT_SK_STROKE_CAP, + ROUND_SK_STROKE_CAP, + SQUARE_SK_STROKE_CAP +} sk_stroke_cap_t; + +/** + Return the paint's stroke cap type, controlling how the start and + end of stroked lines and paths are treated. +*/ +SK_API sk_stroke_cap_t sk_paint_get_stroke_cap(const sk_paint_t*); +/** + Set the paint's stroke cap type, controlling how the start and + end of stroked lines and paths are treated. +*/ +SK_API void sk_paint_set_stroke_cap(sk_paint_t*, sk_stroke_cap_t); + +typedef enum { + MITER_SK_STROKE_JOIN, + ROUND_SK_STROKE_JOIN, + BEVEL_SK_STROKE_JOIN +} sk_stroke_join_t; + +/** + Return the paint's stroke join type, specifies the treatment that + is applied to corners in paths and rectangles + */ +SK_API sk_stroke_join_t sk_paint_get_stroke_join(const sk_paint_t*); +/** + Set the paint's stroke join type, specifies the treatment that + is applied to corners in paths and rectangles + */ +SK_API void sk_paint_set_stroke_join(sk_paint_t*, sk_stroke_join_t); + +/** + * Set the paint's shader to the specified parameter. This will automatically call unref() on + * any previous value, and call ref() on the new value. + */ +SK_API void sk_paint_set_shader(sk_paint_t*, sk_shader_t*); + +/** + * Set the paint's maskfilter to the specified parameter. This will automatically call unref() on + * any previous value, and call ref() on the new value. + */ +SK_API void sk_paint_set_maskfilter(sk_paint_t*, sk_maskfilter_t*); + +/** + * Set the paint's xfermode to the specified parameter. + */ +SK_API void sk_paint_set_xfermode_mode(sk_paint_t*, sk_xfermode_mode_t); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_path.h b/src/deps/skia/include/c/sk_path.h new file mode 100644 index 000000000..ed8cbb94f --- /dev/null +++ b/src/deps/skia/include/c/sk_path.h @@ -0,0 +1,102 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_path_DEFINED +#define sk_path_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +typedef enum { + CW_SK_PATH_DIRECTION, + CCW_SK_PATH_DIRECTION, +} sk_path_direction_t; + +typedef struct sk_pathbuilder_t sk_pathbuilder_t; + +/** Create a new, empty path. */ +SK_API sk_pathbuilder_t* sk_pathbuilder_new(void); + +/** Release the memory used by a sk_pathbuilder_t. */ +SK_API void sk_pathbuilder_delete(sk_pathbuilder_t*); + +/** Set the beginning of the next contour to the point (x,y). */ +SK_API void sk_pathbuilder_move_to(sk_pathbuilder_t*, float x, float y); +/** + Add a line from the last point to the specified point (x,y). If no + sk_pathbuilder_move_to() call has been made for this contour, the first + point is automatically set to (0,0). +*/ +SK_API void sk_pathbuilder_line_to(sk_pathbuilder_t*, float x, float y); +/** + Add a quadratic bezier from the last point, approaching control + point (x0,y0), and ending at (x1,y1). If no sk_pathbuilder_move_to() call + has been made for this contour, the first point is automatically + set to (0,0). +*/ +SK_API void sk_pathbuilder_quad_to(sk_pathbuilder_t*, float x0, float y0, float x1, float y1); +/** + Add a conic curve from the last point, approaching control point + (x0,y01), and ending at (x1,y1) with weight w. If no + sk_pathbuilder_move_to() call has been made for this contour, the first + point is automatically set to (0,0). +*/ +SK_API void sk_pathbuilder_conic_to(sk_pathbuilder_t*, float x0, float y0, float x1, float y1, float w); +/** + Add a cubic bezier from the last point, approaching control points + (x0,y0) and (x1,y1), and ending at (x2,y2). If no + sk_pathbuilder_move_to() call has been made for this contour, the first + point is automatically set to (0,0). +*/ +SK_API void sk_pathbuilder_cubic_to(sk_pathbuilder_t*, + float x0, float y0, + float x1, float y1, + float x2, float y2); +/** + Close the current contour. If the current point is not equal to the + first point of the contour, a line segment is automatically added. +*/ +SK_API void sk_pathbuilder_close(sk_pathbuilder_t*); + +/** + Add a closed rectangle contour to the path. +*/ +SK_API void sk_pathbuilder_add_rect(sk_pathbuilder_t*, const sk_rect_t*, sk_path_direction_t); +/** + Add a closed oval contour to the path +*/ +SK_API void sk_pathbuilder_add_oval(sk_pathbuilder_t*, const sk_rect_t*, sk_path_direction_t); + +/**** path *****/ + +/** +* Return a Path from the builder, resetting the builder to its original empty state. +*/ +SK_API sk_path_t* sk_pathbuilder_detach_path(sk_pathbuilder_t*); + +/** + * Return a Path from the builder. The builder reamins in its current state. + */ +SK_API sk_path_t* sk_pathbuilder_snapshot_path(sk_pathbuilder_t*); + +/** Release the memory used by a sk_path_t. */ +SK_API void sk_path_delete(sk_path_t*); + +/** + * If the path is empty, return false and set the rect parameter to [0, 0, 0, 0]. + * else return true and set the rect parameter to the bounds of the control-points + * of the path. + */ +SK_API bool sk_path_get_bounds(const sk_path_t*, sk_rect_t*); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_picture.h b/src/deps/skia/include/c/sk_picture.h new file mode 100644 index 000000000..e56910407 --- /dev/null +++ b/src/deps/skia/include/c/sk_picture.h @@ -0,0 +1,70 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_picture_DEFINED +#define sk_picture_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +/** + Create a new sk_picture_recorder_t. Its resources should be + released with a call to sk_picture_recorder_delete(). +*/ +SK_API sk_picture_recorder_t* sk_picture_recorder_new(void); +/** + Release the memory and other resources used by this + sk_picture_recorder_t. +*/ +SK_API void sk_picture_recorder_delete(sk_picture_recorder_t*); + +/** + Returns the canvas that records the drawing commands + + @param sk_rect_t* the cull rect used when recording this + picture. Any drawing the falls outside of this + rect is undefined, and may be drawn or it may not. +*/ +SK_API sk_canvas_t* sk_picture_recorder_begin_recording(sk_picture_recorder_t*, const sk_rect_t*); +/** + Signal that the caller is done recording. This invalidates the + canvas returned by begin_recording. Ownership of the sk_picture_t + is passed to the caller, who must call sk_picture_unref() when + they are done using it. The returned picture is immutable. +*/ +SK_API sk_picture_t* sk_picture_recorder_end_recording(sk_picture_recorder_t*); + +/** + Increment the reference count on the given sk_picture_t. Must be + balanced by a call to sk_picture_unref(). +*/ +SK_API void sk_picture_ref(sk_picture_t*); +/** + Decrement the reference count. If the reference count is 1 before + the decrement, then release both the memory holding the + sk_picture_t and any resouces it may be managing. New + sk_picture_t are created with a reference count of 1. +*/ +SK_API void sk_picture_unref(sk_picture_t*); + +/** + Returns a non-zero value unique among all pictures. + */ +SK_API uint32_t sk_picture_get_unique_id(sk_picture_t*); + +/** + Return the cull rect specified when this picture was recorded. +*/ +SK_API sk_rect_t sk_picture_get_bounds(sk_picture_t*); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_shader.h b/src/deps/skia/include/c/sk_shader.h new file mode 100644 index 000000000..023ccbaea --- /dev/null +++ b/src/deps/skia/include/c/sk_shader.h @@ -0,0 +1,143 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_shader_DEFINED +#define sk_shader_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +SK_API void sk_shader_ref(sk_shader_t*); +SK_API void sk_shader_unref(sk_shader_t*); + +typedef enum { + CLAMP_SK_SHADER_TILEMODE, + REPEAT_SK_SHADER_TILEMODE, + MIRROR_SK_SHADER_TILEMODE, +} sk_shader_tilemode_t; + +/** + Returns a shader that generates a linear gradient between the two + specified points. + + @param points The start and end points for the gradient. + @param colors The array[count] of colors, to be distributed between + the two points + @param colorPos May be NULL. array[count] of SkScalars, or NULL, of + the relative position of each corresponding color + in the colors array. If this is NULL, the the + colors are distributed evenly between the start + and end point. If this is not null, the values + must begin with 0, end with 1.0, and intermediate + values must be strictly increasing. + @param colorCount Must be >=2. The number of colors (and pos if not + NULL) entries. + @param mode The tiling mode +*/ +SK_API sk_shader_t* sk_shader_new_linear_gradient(const sk_point_t points[2], + const sk_color_t colors[], + const float colorPos[], + int colorCount, + sk_shader_tilemode_t tileMode, + const sk_matrix_t* localMatrix); + + +/** + Returns a shader that generates a radial gradient given the center + and radius. + + @param center The center of the circle for this gradient + @param radius Must be positive. The radius of the circle for this + gradient + @param colors The array[count] of colors, to be distributed + between the center and edge of the circle + @param colorPos May be NULL. The array[count] of the relative + position of each corresponding color in the colors + array. If this is NULL, the the colors are + distributed evenly between the center and edge of + the circle. If this is not null, the values must + begin with 0, end with 1.0, and intermediate + values must be strictly increasing. + @param count Must be >= 2. The number of colors (and pos if not + NULL) entries + @param tileMode The tiling mode + @param localMatrix May be NULL +*/ +SK_API sk_shader_t* sk_shader_new_radial_gradient(const sk_point_t* center, + float radius, + const sk_color_t colors[], + const float colorPos[], + int colorCount, + sk_shader_tilemode_t tileMode, + const sk_matrix_t* localMatrix); + +/** + Returns a shader that generates a sweep gradient given a center. + + @param center The coordinates of the center of the sweep + @param colors The array[count] of colors, to be distributed around + the center. + @param colorPos May be NULL. The array[count] of the relative + position of each corresponding color in the colors + array. If this is NULL, the the colors are + distributed evenly between the center and edge of + the circle. If this is not null, the values must + begin with 0, end with 1.0, and intermediate + values must be strictly increasing. + @param colorCount Must be >= 2. The number of colors (and pos if + not NULL) entries + @param localMatrix May be NULL +*/ +SK_API sk_shader_t* sk_shader_new_sweep_gradient(const sk_point_t* center, + const sk_color_t colors[], + const float colorPos[], + int colorCount, + const sk_matrix_t* localMatrix); + +/** + Returns a shader that generates a conical gradient given two circles, or + returns NULL if the inputs are invalid. The gradient interprets the + two circles according to the following HTML spec. + http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient + + Returns a shader that generates a sweep gradient given a center. + + @param start, startRadius Defines the first circle. + @param end, endRadius Defines the first circle. + @param colors The array[count] of colors, to be distributed between + the two circles. + @param colorPos May be NULL. The array[count] of the relative + position of each corresponding color in the colors + array. If this is NULL, the the colors are + distributed evenly between the two circles. If + this is not null, the values must begin with 0, + end with 1.0, and intermediate values must be + strictly increasing. + @param colorCount Must be >= 2. The number of colors (and pos if + not NULL) entries + @param tileMode The tiling mode + @param localMatrix May be NULL + +*/ +SK_API sk_shader_t* sk_shader_new_two_point_conical_gradient( + const sk_point_t* start, + float startRadius, + const sk_point_t* end, + float endRadius, + const sk_color_t colors[], + const float colorPos[], + int colorCount, + sk_shader_tilemode_t tileMode, + const sk_matrix_t* localMatrix); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_surface.h b/src/deps/skia/include/c/sk_surface.h new file mode 100644 index 000000000..88c8c87b3 --- /dev/null +++ b/src/deps/skia/include/c/sk_surface.h @@ -0,0 +1,73 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_surface_DEFINED +#define sk_surface_DEFINED + +#include "include/c/sk_types.h" + +SK_C_PLUS_PLUS_BEGIN_GUARD + +/** + Return a new surface, with the memory for the pixels automatically + allocated. If the requested surface cannot be created, or the + request is not a supported configuration, NULL will be returned. + + @param sk_imageinfo_t* Specify the width, height, color type, and + alpha type for the surface. + + @param sk_surfaceprops_t* If not NULL, specify additional non-default + properties of the surface. +*/ +SK_API sk_surface_t* sk_surface_new_raster(const sk_imageinfo_t*, const sk_surfaceprops_t*); + +/** + Create a new surface which will draw into the specified pixels + with the specified rowbytes. If the requested surface cannot be + created, or the request is not a supported configuration, NULL + will be returned. + + @param sk_imageinfo_t* Specify the width, height, color type, and + alpha type for the surface. + @param void* pixels Specify the location in memory where the + destination pixels are. This memory must + outlast this surface. + @param size_t rowBytes Specify the difference, in bytes, between + each adjacent row. Should be at least + (width * sizeof(one pixel)). + @param sk_surfaceprops_t* If not NULL, specify additional non-default + properties of the surface. +*/ +SK_API sk_surface_t* sk_surface_new_raster_direct(const sk_imageinfo_t*, + void* pixels, size_t rowBytes, + const sk_surfaceprops_t* props); + +/** + Decrement the reference count. If the reference count is 1 before + the decrement, then release both the memory holding the + sk_surface_t and any pixel memory it may be managing. New + sk_surface_t are created with a reference count of 1. +*/ +SK_API void sk_surface_unref(sk_surface_t*); + +/** + * Return the canvas associated with this surface. Note: the canvas is owned by the surface, + * so the returned object is only valid while the owning surface is valid. + */ +SK_API sk_canvas_t* sk_surface_get_canvas(sk_surface_t*); + +/** + * Call sk_image_unref() when the returned image is no longer used. + */ +SK_API sk_image_t* sk_surface_new_image_snapshot(sk_surface_t*); + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/c/sk_types.h b/src/deps/skia/include/c/sk_types.h new file mode 100644 index 000000000..5a484af42 --- /dev/null +++ b/src/deps/skia/include/c/sk_types.h @@ -0,0 +1,278 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL +// DO NOT USE -- FOR INTERNAL TESTING ONLY + +#ifndef sk_types_DEFINED +#define sk_types_DEFINED + +#include <stdint.h> +#include <stddef.h> + +#ifdef __cplusplus + #define SK_C_PLUS_PLUS_BEGIN_GUARD extern "C" { + #define SK_C_PLUS_PLUS_END_GUARD } +#else + #include <stdbool.h> + #define SK_C_PLUS_PLUS_BEGIN_GUARD + #define SK_C_PLUS_PLUS_END_GUARD +#endif + +#if !defined(SK_API) + #if defined(SKIA_DLL) + #if defined(_MSC_VER) + #if SKIA_IMPLEMENTATION + #define SK_API __declspec(dllexport) + #else + #define SK_API __declspec(dllimport) + #endif + #else + #define SK_API __attribute__((visibility("default"))) + #endif + #else + #define SK_API + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////////////// + +SK_C_PLUS_PLUS_BEGIN_GUARD + +typedef uint32_t sk_color_t; + +/* This macro assumes all arguments are >=0 and <=255. */ +#define sk_color_set_argb(a, r, g, b) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b)) +#define sk_color_get_a(c) (((c) >> 24) & 0xFF) +#define sk_color_get_r(c) (((c) >> 16) & 0xFF) +#define sk_color_get_g(c) (((c) >> 8) & 0xFF) +#define sk_color_get_b(c) (((c) >> 0) & 0xFF) + +typedef enum { + INTERSECT_SK_CLIPTYPE, + DIFFERENCE_SK_CLIPTYPE, +} sk_cliptype_t; + +typedef enum { + UNKNOWN_SK_PIXELGEOMETRY, + RGB_H_SK_PIXELGEOMETRY, + BGR_H_SK_PIXELGEOMETRY, + RGB_V_SK_PIXELGEOMETRY, + BGR_V_SK_PIXELGEOMETRY, +} sk_pixelgeometry_t; + +typedef struct { + sk_pixelgeometry_t pixelGeometry; +} sk_surfaceprops_t; + +typedef struct { + float x; + float y; +} sk_point_t; + +typedef struct { + int32_t left; + int32_t top; + int32_t right; + int32_t bottom; +} sk_irect_t; + +typedef struct { + float left; + float top; + float right; + float bottom; +} sk_rect_t; + +/** + The sk_matrix_t struct holds a 3x3 perspective matrix for + transforming coordinates: + + (X,Y) = T[M]((x,y)) + X = (M[0] * x + M[1] * y + M[2]) / (M[6] * x + M[7] * y + M[8]); + Y = (M[3] * x + M[4] * y + M[5]) / (M[6] * x + M[7] * y + M[8]); + + Therefore, the identity matrix is + + sk_matrix_t identity = {{1, 0, 0, + 0, 1, 0, + 0, 0, 1}}; + + A matrix that scales by sx and sy is: + + sk_matrix_t scale = {{sx, 0, 0, + 0, sy, 0, + 0, 0, 1}}; + + A matrix that translates by tx and ty is: + + sk_matrix_t translate = {{1, 0, tx, + 0, 1, ty, + 0, 0, 1}}; + + A matrix that rotates around the origin by A radians: + + sk_matrix_t rotate = {{cos(A), -sin(A), 0, + sin(A), cos(A), 0, + 0, 0, 1}}; + + Two matrixes can be concatinated by: + + void concat_matrices(sk_matrix_t* dst, + const sk_matrix_t* matrixU, + const sk_matrix_t* matrixV) { + const float* u = matrixU->mat; + const float* v = matrixV->mat; + sk_matrix_t result = {{ + u[0] * v[0] + u[1] * v[3] + u[2] * v[6], + u[0] * v[1] + u[1] * v[4] + u[2] * v[7], + u[0] * v[2] + u[1] * v[5] + u[2] * v[8], + u[3] * v[0] + u[4] * v[3] + u[5] * v[6], + u[3] * v[1] + u[4] * v[4] + u[5] * v[7], + u[3] * v[2] + u[4] * v[5] + u[5] * v[8], + u[6] * v[0] + u[7] * v[3] + u[8] * v[6], + u[6] * v[1] + u[7] * v[4] + u[8] * v[7], + u[6] * v[2] + u[7] * v[5] + u[8] * v[8] + }}; + *dst = result; + } +*/ +typedef struct { + float mat[9]; +} sk_matrix_t; + +/** + A sk_canvas_t encapsulates all of the state about drawing into a + destination This includes a reference to the destination itself, + and a stack of matrix/clip values. +*/ +typedef struct sk_canvas_t sk_canvas_t; +/** + A sk_data_ holds an immutable data buffer. +*/ +typedef struct sk_data_t sk_data_t; +/** + A sk_image_t is an abstraction for drawing a rectagle of pixels. + The content of the image is always immutable, though the actual + storage may change, if for example that image can be re-created via + encoded data or other means. +*/ +typedef struct sk_image_t sk_image_t; + +/** + * Describes the color components. See ICC Profiles. + */ +typedef struct sk_colorspace_t sk_colorspace_t; + +/** + * Describes an image buffer : width, height, pixel type, colorspace, etc. + */ +typedef struct sk_imageinfo_t sk_imageinfo_t; + +/** + A sk_maskfilter_t is an object that perform transformations on an + alpha-channel mask before drawing it; it may be installed into a + sk_paint_t. Each time a primitive is drawn, it is first + scan-converted into a alpha mask, which os handed to the + maskfilter, which may create a new mask is to render into the + destination. + */ +typedef struct sk_maskfilter_t sk_maskfilter_t; +/** + A sk_paint_t holds the style and color information about how to + draw geometries, text and bitmaps. +*/ +typedef struct sk_paint_t sk_paint_t; +/** + A sk_path_t encapsulates compound (multiple contour) geometric + paths consisting of straight line segments, quadratic curves, and + cubic curves. +*/ +typedef struct sk_path_t sk_path_t; +/** + A sk_picture_t holds recorded canvas drawing commands to be played + back at a later time. +*/ +typedef struct sk_picture_t sk_picture_t; +/** + A sk_picture_recorder_t holds a sk_canvas_t that records commands + to create a sk_picture_t. +*/ +typedef struct sk_picture_recorder_t sk_picture_recorder_t; +/** + A sk_shader_t specifies the source color(s) for what is being drawn. If a + paint has no shader, then the paint's color is used. If the paint + has a shader, then the shader's color(s) are use instead, but they + are modulated by the paint's alpha. +*/ +typedef struct sk_shader_t sk_shader_t; +/** + A sk_surface_t holds the destination for drawing to a canvas. For + raster drawing, the destination is an array of pixels in memory. + For GPU drawing, the destination is a texture or a framebuffer. +*/ +typedef struct sk_surface_t sk_surface_t; + +typedef enum { + NEAREST_SK_FILTER_MODE, + LINEAR_SK_FILTER_MODE, +} sk_filter_mode_t; + +typedef enum { + NONE_SK_MIPMAP_MODE, + NEAREST_SK_MIPMAP_MODE, + LINEAR_SK_MIPMAP_MODE, +} sk_mipmap_mode_t; + +typedef struct { + float B, C; +} sk_cubic_resampler_t; + +typedef struct { + bool useCubic; + sk_cubic_resampler_t cubic; + sk_filter_mode_t filter; + sk_mipmap_mode_t mipmap; +} sk_sampling_options_t; + +typedef enum { + CLEAR_SK_XFERMODE_MODE, + SRC_SK_XFERMODE_MODE, + DST_SK_XFERMODE_MODE, + SRCOVER_SK_XFERMODE_MODE, + DSTOVER_SK_XFERMODE_MODE, + SRCIN_SK_XFERMODE_MODE, + DSTIN_SK_XFERMODE_MODE, + SRCOUT_SK_XFERMODE_MODE, + DSTOUT_SK_XFERMODE_MODE, + SRCATOP_SK_XFERMODE_MODE, + DSTATOP_SK_XFERMODE_MODE, + XOR_SK_XFERMODE_MODE, + PLUS_SK_XFERMODE_MODE, + MODULATE_SK_XFERMODE_MODE, + SCREEN_SK_XFERMODE_MODE, + OVERLAY_SK_XFERMODE_MODE, + DARKEN_SK_XFERMODE_MODE, + LIGHTEN_SK_XFERMODE_MODE, + COLORDODGE_SK_XFERMODE_MODE, + COLORBURN_SK_XFERMODE_MODE, + HARDLIGHT_SK_XFERMODE_MODE, + SOFTLIGHT_SK_XFERMODE_MODE, + DIFFERENCE_SK_XFERMODE_MODE, + EXCLUSION_SK_XFERMODE_MODE, + MULTIPLY_SK_XFERMODE_MODE, + HUE_SK_XFERMODE_MODE, + SATURATION_SK_XFERMODE_MODE, + COLOR_SK_XFERMODE_MODE, + LUMINOSITY_SK_XFERMODE_MODE, +} sk_xfermode_mode_t; + +////////////////////////////////////////////////////////////////////////////////////////// + +SK_C_PLUS_PLUS_END_GUARD + +#endif diff --git a/src/deps/skia/include/codec/BUILD.bazel b/src/deps/skia/include/codec/BUILD.bazel new file mode 100644 index 000000000..27db9c460 --- /dev/null +++ b/src/deps/skia/include/codec/BUILD.bazel @@ -0,0 +1,47 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkAndroidCodec_hdr", + hdrs = ["SkAndroidCodec.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkCodec_hdr", + "//include/core:SkEncodedImageFormat_hdr", + "//include/core:SkStream_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkCodecAnimation_hdr", + hdrs = ["SkCodecAnimation.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkCodec_hdr", + hdrs = ["SkCodec.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkCodecAnimation_hdr", + ":SkEncodedOrigin_hdr", + "//include/core:SkColor_hdr", + "//include/core:SkEncodedImageFormat_hdr", + "//include/core:SkImageInfo_hdr", + "//include/core:SkPixmap_hdr", + "//include/core:SkSize_hdr", + "//include/core:SkStream_hdr", + "//include/core:SkTypes_hdr", + "//include/core:SkYUVAPixmaps_hdr", + "//include/private:SkEncodedInfo_hdr", + "//include/private:SkNoncopyable_hdr", + "//include/private:SkTemplates_hdr", + ], +) + +generated_cc_atom( + name = "SkEncodedOrigin_hdr", + hdrs = ["SkEncodedOrigin.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkMatrix_hdr"], +) diff --git a/src/deps/skia/include/codec/SkAndroidCodec.h b/src/deps/skia/include/codec/SkAndroidCodec.h new file mode 100644 index 000000000..11157429f --- /dev/null +++ b/src/deps/skia/include/codec/SkAndroidCodec.h @@ -0,0 +1,263 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkAndroidCodec_DEFINED +#define SkAndroidCodec_DEFINED + +#include "include/codec/SkCodec.h" +#include "include/core/SkEncodedImageFormat.h" +#include "include/core/SkStream.h" +#include "include/core/SkTypes.h" + +/** + * Abstract interface defining image codec functionality that is necessary for + * Android. + */ +class SK_API SkAndroidCodec : SkNoncopyable { +public: + /** + * Deprecated. + * + * Now that SkAndroidCodec supports multiframe images, there are multiple + * ways to handle compositing an oriented frame on top of an oriented frame + * with different tradeoffs. SkAndroidCodec now ignores the orientation and + * forces the client to handle it. + */ + enum class ExifOrientationBehavior { + kIgnore, + kRespect, + }; + + /** + * Pass ownership of an SkCodec to a newly-created SkAndroidCodec. + */ + static std::unique_ptr<SkAndroidCodec> MakeFromCodec(std::unique_ptr<SkCodec>); + + /** + * If this stream represents an encoded image that we know how to decode, + * return an SkAndroidCodec that can decode it. Otherwise return NULL. + * + * The SkPngChunkReader handles unknown chunks in PNGs. + * See SkCodec.h for more details. + * + * If NULL is returned, the stream is deleted immediately. Otherwise, the + * SkCodec takes ownership of it, and will delete it when done with it. + */ + static std::unique_ptr<SkAndroidCodec> MakeFromStream(std::unique_ptr<SkStream>, + SkPngChunkReader* = nullptr); + + /** + * If this data represents an encoded image that we know how to decode, + * return an SkAndroidCodec that can decode it. Otherwise return NULL. + * + * The SkPngChunkReader handles unknown chunks in PNGs. + * See SkCodec.h for more details. + */ + static std::unique_ptr<SkAndroidCodec> MakeFromData(sk_sp<SkData>, SkPngChunkReader* = nullptr); + + virtual ~SkAndroidCodec(); + + // TODO: fInfo is now just a cache of SkCodec's SkImageInfo. No need to + // cache and return a reference here, once Android call-sites are updated. + const SkImageInfo& getInfo() const { return fInfo; } + + /** + * Return the ICC profile of the encoded data. + */ + const skcms_ICCProfile* getICCProfile() const { + return fCodec->getEncodedInfo().profile(); + } + + /** + * Format of the encoded data. + */ + SkEncodedImageFormat getEncodedFormat() const { return fCodec->getEncodedFormat(); } + + /** + * @param requestedColorType Color type requested by the client + * + * |requestedColorType| may be overriden. We will default to kF16 + * for high precision images. + * + * In the general case, if it is possible to decode to + * |requestedColorType|, this returns |requestedColorType|. + * Otherwise, this returns a color type that is an appropriate + * match for the the encoded data. + */ + SkColorType computeOutputColorType(SkColorType requestedColorType); + + /** + * @param requestedUnpremul Indicates if the client requested + * unpremultiplied output + * + * Returns the appropriate alpha type to decode to. If the image + * has alpha, the value of requestedUnpremul will be honored. + */ + SkAlphaType computeOutputAlphaType(bool requestedUnpremul); + + /** + * @param outputColorType Color type that the client will decode to. + * @param prefColorSpace Preferred color space to decode to. + * This may not return |prefColorSpace| for a couple reasons. + * (1) Android Principles: 565 must be sRGB, F16 must be + * linear sRGB, transfer function must be parametric. + * (2) Codec Limitations: F16 requires a linear color space. + * + * Returns the appropriate color space to decode to. + */ + sk_sp<SkColorSpace> computeOutputColorSpace(SkColorType outputColorType, + sk_sp<SkColorSpace> prefColorSpace = nullptr); + + /** + * Compute the appropriate sample size to get to |size|. + * + * @param size As an input parameter, the desired output size of + * the decode. As an output parameter, the smallest sampled size + * larger than the input. + * @return the sample size to set AndroidOptions::fSampleSize to decode + * to the output |size|. + */ + int computeSampleSize(SkISize* size) const; + + /** + * Returns the dimensions of the scaled output image, for an input + * sampleSize. + * + * When the sample size divides evenly into the original dimensions, the + * scaled output dimensions will simply be equal to the original + * dimensions divided by the sample size. + * + * When the sample size does not divide even into the original + * dimensions, the codec may round up or down, depending on what is most + * efficient to decode. + * + * Finally, the codec will always recommend a non-zero output, so the output + * dimension will always be one if the sampleSize is greater than the + * original dimension. + */ + SkISize getSampledDimensions(int sampleSize) const; + + /** + * Return (via desiredSubset) a subset which can decoded from this codec, + * or false if the input subset is invalid. + * + * @param desiredSubset in/out parameter + * As input, a desired subset of the original bounds + * (as specified by getInfo). + * As output, if true is returned, desiredSubset may + * have been modified to a subset which is + * supported. Although a particular change may have + * been made to desiredSubset to create something + * supported, it is possible other changes could + * result in a valid subset. If false is returned, + * desiredSubset's value is undefined. + * @return true If the input desiredSubset is valid. + * desiredSubset may be modified to a subset + * supported by the codec. + * false If desiredSubset is invalid (NULL or not fully + * contained within the image). + */ + bool getSupportedSubset(SkIRect* desiredSubset) const; + // TODO: Rename SkCodec::getValidSubset() to getSupportedSubset() + + /** + * Returns the dimensions of the scaled, partial output image, for an + * input sampleSize and subset. + * + * @param sampleSize Factor to scale down by. + * @param subset Must be a valid subset of the original image + * dimensions and a subset supported by SkAndroidCodec. + * getSubset() can be used to obtain a subset supported + * by SkAndroidCodec. + * @return Size of the scaled partial image. Or zero size + * if either of the inputs is invalid. + */ + SkISize getSampledSubsetDimensions(int sampleSize, const SkIRect& subset) const; + + /** + * Additional options to pass to getAndroidPixels(). + */ + // FIXME: It's a bit redundant to name these AndroidOptions when this class is already + // called SkAndroidCodec. On the other hand, it's may be a bit confusing to call + // these Options when SkCodec has a slightly different set of Options. Maybe these + // should be DecodeOptions or SamplingOptions? + struct AndroidOptions : public SkCodec::Options { + AndroidOptions() + : SkCodec::Options() + , fSampleSize(1) + {} + + /** + * The client may provide an integer downscale factor for the decode. + * The codec may implement this downscaling by sampling or another + * method if it is more efficient. + * + * The default is 1, representing no downscaling. + */ + int fSampleSize; + }; + + /** + * Decode into the given pixels, a block of memory of size at + * least (info.fHeight - 1) * rowBytes + (info.fWidth * + * bytesPerPixel) + * + * Repeated calls to this function should give the same results, + * allowing the PixelRef to be immutable. + * + * @param info A description of the format (config, size) + * expected by the caller. This can simply be identical + * to the info returned by getInfo(). + * + * This contract also allows the caller to specify + * different output-configs, which the implementation can + * decide to support or not. + * + * A size that does not match getInfo() implies a request + * to scale or subset. If the codec cannot perform this + * scaling or subsetting, it will return an error code. + * + * The AndroidOptions object is also used to specify any requested scaling or subsetting + * using options->fSampleSize and options->fSubset. If NULL, the defaults (as specified above + * for AndroidOptions) are used. + * + * @return Result kSuccess, or another value explaining the type of failure. + */ + // FIXME: It's a bit redundant to name this getAndroidPixels() when this class is already + // called SkAndroidCodec. On the other hand, it's may be a bit confusing to call + // this getPixels() when it is a slightly different API than SkCodec's getPixels(). + // Maybe this should be decode() or decodeSubset()? + SkCodec::Result getAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, + const AndroidOptions* options); + + /** + * Simplified version of getAndroidPixels() where we supply the default AndroidOptions as + * specified above for AndroidOptions. It will not perform any scaling or subsetting. + */ + SkCodec::Result getAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes); + + SkCodec::Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) { + return this->getAndroidPixels(info, pixels, rowBytes); + } + + SkCodec* codec() const { return fCodec.get(); } + +protected: + SkAndroidCodec(SkCodec*); + + virtual SkISize onGetSampledDimensions(int sampleSize) const = 0; + + virtual bool onGetSupportedSubset(SkIRect* desiredSubset) const = 0; + + virtual SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels, + size_t rowBytes, const AndroidOptions& options) = 0; + +private: + const SkImageInfo fInfo; + std::unique_ptr<SkCodec> fCodec; +}; +#endif // SkAndroidCodec_DEFINED diff --git a/src/deps/skia/include/codec/SkCodec.h b/src/deps/skia/include/codec/SkCodec.h new file mode 100644 index 000000000..e7c7c0eaa --- /dev/null +++ b/src/deps/skia/include/codec/SkCodec.h @@ -0,0 +1,992 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCodec_DEFINED +#define SkCodec_DEFINED + +#include "include/codec/SkCodecAnimation.h" +#include "include/codec/SkEncodedOrigin.h" +#include "include/core/SkColor.h" +#include "include/core/SkEncodedImageFormat.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkPixmap.h" +#include "include/core/SkSize.h" +#include "include/core/SkStream.h" +#include "include/core/SkTypes.h" +#include "include/core/SkYUVAPixmaps.h" +#include "include/private/SkEncodedInfo.h" +#include "include/private/SkNoncopyable.h" +#include "include/private/SkTemplates.h" + +#include <vector> + +class SkAndroidCodec; +class SkColorSpace; +class SkData; +class SkFrameHolder; +class SkImage; +class SkPngChunkReader; +class SkSampler; + +namespace DM { +class CodecSrc; +class ColorCodecSrc; +} // namespace DM + +/** + * Abstraction layer directly on top of an image codec. + */ +class SK_API SkCodec : SkNoncopyable { +public: + /** + * Minimum number of bytes that must be buffered in SkStream input. + * + * An SkStream passed to NewFromStream must be able to use this many + * bytes to determine the image type. Then the same SkStream must be + * passed to the correct decoder to read from the beginning. + * + * This can be accomplished by implementing peek() to support peeking + * this many bytes, or by implementing rewind() to be able to rewind() + * after reading this many bytes. + */ + static constexpr size_t MinBufferedBytesNeeded() { return 32; } + + /** + * Error codes for various SkCodec methods. + */ + enum Result { + /** + * General return value for success. + */ + kSuccess, + /** + * The input is incomplete. A partial image was generated. + */ + kIncompleteInput, + /** + * Like kIncompleteInput, except the input had an error. + * + * If returned from an incremental decode, decoding cannot continue, + * even with more data. + */ + kErrorInInput, + /** + * The generator cannot convert to match the request, ignoring + * dimensions. + */ + kInvalidConversion, + /** + * The generator cannot scale to requested size. + */ + kInvalidScale, + /** + * Parameters (besides info) are invalid. e.g. NULL pixels, rowBytes + * too small, etc. + */ + kInvalidParameters, + /** + * The input did not contain a valid image. + */ + kInvalidInput, + /** + * Fulfilling this request requires rewinding the input, which is not + * supported for this input. + */ + kCouldNotRewind, + /** + * An internal error, such as OOM. + */ + kInternalError, + /** + * This method is not implemented by this codec. + * FIXME: Perhaps this should be kUnsupported? + */ + kUnimplemented, + }; + + /** + * Readable string representing the error code. + */ + static const char* ResultToString(Result); + + /** + * For container formats that contain both still images and image sequences, + * instruct the decoder how the output should be selected. (Refer to comments + * for each value for more details.) + */ + enum class SelectionPolicy { + /** + * If the container format contains both still images and image sequences, + * SkCodec should choose one of the still images. This is the default. + */ + kPreferStillImage, + /** + * If the container format contains both still images and image sequences, + * SkCodec should choose one of the image sequences for animation. + */ + kPreferAnimation, + }; + + /** + * If this stream represents an encoded image that we know how to decode, + * return an SkCodec that can decode it. Otherwise return NULL. + * + * As stated above, this call must be able to peek or read + * MinBufferedBytesNeeded to determine the correct format, and then start + * reading from the beginning. First it will attempt to peek, and it + * assumes that if less than MinBufferedBytesNeeded bytes (but more than + * zero) are returned, this is because the stream is shorter than this, + * so falling back to reading would not provide more data. If peek() + * returns zero bytes, this call will instead attempt to read(). This + * will require that the stream can be rewind()ed. + * + * If Result is not NULL, it will be set to either kSuccess if an SkCodec + * is returned or a reason for the failure if NULL is returned. + * + * If SkPngChunkReader is not NULL, take a ref and pass it to libpng if + * the image is a png. + * + * If the SkPngChunkReader is not NULL then: + * If the image is not a PNG, the SkPngChunkReader will be ignored. + * If the image is a PNG, the SkPngChunkReader will be reffed. + * If the PNG has unknown chunks, the SkPngChunkReader will be used + * to handle these chunks. SkPngChunkReader will be called to read + * any unknown chunk at any point during the creation of the codec + * or the decode. Note that if SkPngChunkReader fails to read a + * chunk, this could result in a failure to create the codec or a + * failure to decode the image. + * If the PNG does not contain unknown chunks, the SkPngChunkReader + * will not be used or modified. + * + * If NULL is returned, the stream is deleted immediately. Otherwise, the + * SkCodec takes ownership of it, and will delete it when done with it. + */ + static std::unique_ptr<SkCodec> MakeFromStream( + std::unique_ptr<SkStream>, Result* = nullptr, + SkPngChunkReader* = nullptr, + SelectionPolicy selectionPolicy = SelectionPolicy::kPreferStillImage); + + /** + * If this data represents an encoded image that we know how to decode, + * return an SkCodec that can decode it. Otherwise return NULL. + * + * If the SkPngChunkReader is not NULL then: + * If the image is not a PNG, the SkPngChunkReader will be ignored. + * If the image is a PNG, the SkPngChunkReader will be reffed. + * If the PNG has unknown chunks, the SkPngChunkReader will be used + * to handle these chunks. SkPngChunkReader will be called to read + * any unknown chunk at any point during the creation of the codec + * or the decode. Note that if SkPngChunkReader fails to read a + * chunk, this could result in a failure to create the codec or a + * failure to decode the image. + * If the PNG does not contain unknown chunks, the SkPngChunkReader + * will not be used or modified. + */ + static std::unique_ptr<SkCodec> MakeFromData(sk_sp<SkData>, SkPngChunkReader* = nullptr); + + virtual ~SkCodec(); + + /** + * Return a reasonable SkImageInfo to decode into. + * + * If the image has an ICC profile that does not map to an SkColorSpace, + * the returned SkImageInfo will use SRGB. + */ + SkImageInfo getInfo() const { return fEncodedInfo.makeImageInfo(); } + + SkISize dimensions() const { return {fEncodedInfo.width(), fEncodedInfo.height()}; } + SkIRect bounds() const { + return SkIRect::MakeWH(fEncodedInfo.width(), fEncodedInfo.height()); + } + + /** + * Return the ICC profile of the encoded data. + */ + const skcms_ICCProfile* getICCProfile() const { + return this->getEncodedInfo().profile(); + } + + /** + * Returns the image orientation stored in the EXIF data. + * If there is no EXIF data, or if we cannot read the EXIF data, returns kTopLeft. + */ + SkEncodedOrigin getOrigin() const { return fOrigin; } + + /** + * Return a size that approximately supports the desired scale factor. + * The codec may not be able to scale efficiently to the exact scale + * factor requested, so return a size that approximates that scale. + * The returned value is the codec's suggestion for the closest valid + * scale that it can natively support + */ + SkISize getScaledDimensions(float desiredScale) const { + // Negative and zero scales are errors. + SkASSERT(desiredScale > 0.0f); + if (desiredScale <= 0.0f) { + return SkISize::Make(0, 0); + } + + // Upscaling is not supported. Return the original size if the client + // requests an upscale. + if (desiredScale >= 1.0f) { + return this->dimensions(); + } + return this->onGetScaledDimensions(desiredScale); + } + + /** + * Return (via desiredSubset) a subset which can decoded from this codec, + * or false if this codec cannot decode subsets or anything similar to + * desiredSubset. + * + * @param desiredSubset In/out parameter. As input, a desired subset of + * the original bounds (as specified by getInfo). If true is returned, + * desiredSubset may have been modified to a subset which is + * supported. Although a particular change may have been made to + * desiredSubset to create something supported, it is possible other + * changes could result in a valid subset. + * If false is returned, desiredSubset's value is undefined. + * @return true if this codec supports decoding desiredSubset (as + * returned, potentially modified) + */ + bool getValidSubset(SkIRect* desiredSubset) const { + return this->onGetValidSubset(desiredSubset); + } + + /** + * Format of the encoded data. + */ + SkEncodedImageFormat getEncodedFormat() const { return this->onGetEncodedFormat(); } + + /** + * Whether or not the memory passed to getPixels is zero initialized. + */ + enum ZeroInitialized { + /** + * The memory passed to getPixels is zero initialized. The SkCodec + * may take advantage of this by skipping writing zeroes. + */ + kYes_ZeroInitialized, + /** + * The memory passed to getPixels has not been initialized to zero, + * so the SkCodec must write all zeroes to memory. + * + * This is the default. It will be used if no Options struct is used. + */ + kNo_ZeroInitialized, + }; + + /** + * Additional options to pass to getPixels. + */ + struct Options { + Options() + : fZeroInitialized(kNo_ZeroInitialized) + , fSubset(nullptr) + , fFrameIndex(0) + , fPriorFrame(kNoFrame) + {} + + ZeroInitialized fZeroInitialized; + /** + * If not NULL, represents a subset of the original image to decode. + * Must be within the bounds returned by getInfo(). + * If the EncodedFormat is SkEncodedImageFormat::kWEBP (the only one which + * currently supports subsets), the top and left values must be even. + * + * In getPixels and incremental decode, we will attempt to decode the + * exact rectangular subset specified by fSubset. + * + * In a scanline decode, it does not make sense to specify a subset + * top or subset height, since the client already controls which rows + * to get and which rows to skip. During scanline decodes, we will + * require that the subset top be zero and the subset height be equal + * to the full height. We will, however, use the values of + * subset left and subset width to decode partial scanlines on calls + * to getScanlines(). + */ + const SkIRect* fSubset; + + /** + * The frame to decode. + * + * Only meaningful for multi-frame images. + */ + int fFrameIndex; + + /** + * If not kNoFrame, the dst already contains the prior frame at this index. + * + * Only meaningful for multi-frame images. + * + * If fFrameIndex needs to be blended with a prior frame (as reported by + * getFrameInfo[fFrameIndex].fRequiredFrame), the client can set this to + * any non-kRestorePrevious frame in [fRequiredFrame, fFrameIndex) to + * indicate that that frame is already in the dst. Options.fZeroInitialized + * is ignored in this case. + * + * If set to kNoFrame, the codec will decode any necessary required frame(s) first. + */ + int fPriorFrame; + }; + + /** + * Decode into the given pixels, a block of memory of size at + * least (info.fHeight - 1) * rowBytes + (info.fWidth * + * bytesPerPixel) + * + * Repeated calls to this function should give the same results, + * allowing the PixelRef to be immutable. + * + * @param info A description of the format (config, size) + * expected by the caller. This can simply be identical + * to the info returned by getInfo(). + * + * This contract also allows the caller to specify + * different output-configs, which the implementation can + * decide to support or not. + * + * A size that does not match getInfo() implies a request + * to scale. If the generator cannot perform this scale, + * it will return kInvalidScale. + * + * If the info contains a non-null SkColorSpace, the codec + * will perform the appropriate color space transformation. + * + * If the caller passes in the SkColorSpace that maps to the + * ICC profile reported by getICCProfile(), the color space + * transformation is a no-op. + * + * If the caller passes a null SkColorSpace, no color space + * transformation will be done. + * + * If a scanline decode is in progress, scanline mode will end, requiring the client to call + * startScanlineDecode() in order to return to decoding scanlines. + * + * @return Result kSuccess, or another value explaining the type of failure. + */ + Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const Options*); + + /** + * Simplified version of getPixels() that uses the default Options. + */ + Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) { + return this->getPixels(info, pixels, rowBytes, nullptr); + } + + Result getPixels(const SkPixmap& pm, const Options* opts = nullptr) { + return this->getPixels(pm.info(), pm.writable_addr(), pm.rowBytes(), opts); + } + + /** + * Return an image containing the pixels. + */ + std::tuple<sk_sp<SkImage>, SkCodec::Result> getImage(const SkImageInfo& info, + const Options* opts = nullptr); + std::tuple<sk_sp<SkImage>, SkCodec::Result> getImage(); + + /** + * If decoding to YUV is supported, this returns true. Otherwise, this + * returns false and the caller will ignore output parameter yuvaPixmapInfo. + * + * @param supportedDataTypes Indicates the data type/planar config combinations that are + * supported by the caller. If the generator supports decoding to + * YUV(A), but not as a type in supportedDataTypes, this method + * returns false. + * @param yuvaPixmapInfo Output parameter that specifies the planar configuration, subsampling, + * orientation, chroma siting, plane color types, and row bytes. + */ + bool queryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes, + SkYUVAPixmapInfo* yuvaPixmapInfo) const; + + /** + * Returns kSuccess, or another value explaining the type of failure. + * This always attempts to perform a full decode. To get the planar + * configuration without decoding use queryYUVAInfo(). + * + * @param yuvaPixmaps Contains preallocated pixmaps configured according to a successful call + * to queryYUVAInfo(). + */ + Result getYUVAPlanes(const SkYUVAPixmaps& yuvaPixmaps); + + /** + * Prepare for an incremental decode with the specified options. + * + * This may require a rewind. + * + * If kIncompleteInput is returned, may be called again after more data has + * been provided to the source SkStream. + * + * @param dstInfo Info of the destination. If the dimensions do not match + * those of getInfo, this implies a scale. + * @param dst Memory to write to. Needs to be large enough to hold the subset, + * if present, or the full image as described in dstInfo. + * @param options Contains decoding options, including if memory is zero + * initialized and whether to decode a subset. + * @return Enum representing success or reason for failure. + */ + Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes, + const Options*); + + Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes) { + return this->startIncrementalDecode(dstInfo, dst, rowBytes, nullptr); + } + + /** + * Start/continue the incremental decode. + * + * Not valid to call before a call to startIncrementalDecode() returns + * kSuccess. + * + * If kIncompleteInput is returned, may be called again after more data has + * been provided to the source SkStream. + * + * Unlike getPixels and getScanlines, this does not do any filling. This is + * left up to the caller, since they may be skipping lines or continuing the + * decode later. In the latter case, they may choose to initialize all lines + * first, or only initialize the remaining lines after the first call. + * + * @param rowsDecoded Optional output variable returning the total number of + * lines initialized. Only meaningful if this method returns kIncompleteInput. + * Otherwise the implementation may not set it. + * Note that some implementations may have initialized this many rows, but + * not necessarily finished those rows (e.g. interlaced PNG). This may be + * useful for determining what rows the client needs to initialize. + * @return kSuccess if all lines requested in startIncrementalDecode have + * been completely decoded. kIncompleteInput otherwise. + */ + Result incrementalDecode(int* rowsDecoded = nullptr) { + if (!fStartedIncrementalDecode) { + return kInvalidParameters; + } + return this->onIncrementalDecode(rowsDecoded); + } + + /** + * The remaining functions revolve around decoding scanlines. + */ + + /** + * Prepare for a scanline decode with the specified options. + * + * After this call, this class will be ready to decode the first scanline. + * + * This must be called in order to call getScanlines or skipScanlines. + * + * This may require rewinding the stream. + * + * Not all SkCodecs support this. + * + * @param dstInfo Info of the destination. If the dimensions do not match + * those of getInfo, this implies a scale. + * @param options Contains decoding options, including if memory is zero + * initialized. + * @return Enum representing success or reason for failure. + */ + Result startScanlineDecode(const SkImageInfo& dstInfo, const Options* options); + + /** + * Simplified version of startScanlineDecode() that uses the default Options. + */ + Result startScanlineDecode(const SkImageInfo& dstInfo) { + return this->startScanlineDecode(dstInfo, nullptr); + } + + /** + * Write the next countLines scanlines into dst. + * + * Not valid to call before calling startScanlineDecode(). + * + * @param dst Must be non-null, and large enough to hold countLines + * scanlines of size rowBytes. + * @param countLines Number of lines to write. + * @param rowBytes Number of bytes per row. Must be large enough to hold + * a scanline based on the SkImageInfo used to create this object. + * @return the number of lines successfully decoded. If this value is + * less than countLines, this will fill the remaining lines with a + * default value. + */ + int getScanlines(void* dst, int countLines, size_t rowBytes); + + /** + * Skip count scanlines. + * + * Not valid to call before calling startScanlineDecode(). + * + * The default version just calls onGetScanlines and discards the dst. + * NOTE: If skipped lines are the only lines with alpha, this default + * will make reallyHasAlpha return true, when it could have returned + * false. + * + * @return true if the scanlines were successfully skipped + * false on failure, possible reasons for failure include: + * An incomplete input image stream. + * Calling this function before calling startScanlineDecode(). + * If countLines is less than zero or so large that it moves + * the current scanline past the end of the image. + */ + bool skipScanlines(int countLines); + + /** + * The order in which rows are output from the scanline decoder is not the + * same for all variations of all image types. This explains the possible + * output row orderings. + */ + enum SkScanlineOrder { + /* + * By far the most common, this indicates that the image can be decoded + * reliably using the scanline decoder, and that rows will be output in + * the logical order. + */ + kTopDown_SkScanlineOrder, + + /* + * This indicates that the scanline decoder reliably outputs rows, but + * they will be returned in reverse order. If the scanline format is + * kBottomUp, the nextScanline() API can be used to determine the actual + * y-coordinate of the next output row, but the client is not forced + * to take advantage of this, given that it's not too tough to keep + * track independently. + * + * For full image decodes, it is safe to get all of the scanlines at + * once, since the decoder will handle inverting the rows as it + * decodes. + * + * For subset decodes and sampling, it is simplest to get and skip + * scanlines one at a time, using the nextScanline() API. It is + * possible to ask for larger chunks at a time, but this should be used + * with caution. As with full image decodes, the decoder will handle + * inverting the requested rows, but rows will still be delivered + * starting from the bottom of the image. + * + * Upside down bmps are an example. + */ + kBottomUp_SkScanlineOrder, + }; + + /** + * An enum representing the order in which scanlines will be returned by + * the scanline decoder. + * + * This is undefined before startScanlineDecode() is called. + */ + SkScanlineOrder getScanlineOrder() const { return this->onGetScanlineOrder(); } + + /** + * Returns the y-coordinate of the next row to be returned by the scanline + * decoder. + * + * This will equal fCurrScanline, except in the case of strangely + * encoded image types (bottom-up bmps). + * + * Results are undefined when not in scanline decoding mode. + */ + int nextScanline() const { return this->outputScanline(fCurrScanline); } + + /** + * Returns the output y-coordinate of the row that corresponds to an input + * y-coordinate. The input y-coordinate represents where the scanline + * is located in the encoded data. + * + * This will equal inputScanline, except in the case of strangely + * encoded image types (bottom-up bmps, interlaced gifs). + */ + int outputScanline(int inputScanline) const; + + /** + * Return the number of frames in the image. + * + * May require reading through the stream. + */ + int getFrameCount() { + return this->onGetFrameCount(); + } + + // Sentinel value used when a frame index implies "no frame": + // - FrameInfo::fRequiredFrame set to this value means the frame + // is independent. + // - Options::fPriorFrame set to this value means no (relevant) prior frame + // is residing in dst's memory. + static constexpr int kNoFrame = -1; + + // This transitional definition was added in August 2018, and will eventually be removed. +#ifdef SK_LEGACY_SKCODEC_NONE_ENUM + static constexpr int kNone = kNoFrame; +#endif + + /** + * Information about individual frames in a multi-framed image. + */ + struct FrameInfo { + /** + * The frame that this frame needs to be blended with, or + * kNoFrame if this frame is independent (so it can be + * drawn over an uninitialized buffer). + * + * Note that this is the *earliest* frame that can be used + * for blending. Any frame from [fRequiredFrame, i) can be + * used, unless its fDisposalMethod is kRestorePrevious. + */ + int fRequiredFrame; + + /** + * Number of milliseconds to show this frame. + */ + int fDuration; + + /** + * Whether the end marker for this frame is contained in the stream. + * + * Note: this does not guarantee that an attempt to decode will be complete. + * There could be an error in the stream. + */ + bool fFullyReceived; + + /** + * This is conservative; it will still return non-opaque if e.g. a + * color index-based frame has a color with alpha but does not use it. + */ + SkAlphaType fAlphaType; + + /** + * Whether the updated rectangle contains alpha. + * + * This is conservative; it will still be set to true if e.g. a color + * index-based frame has a color with alpha but does not use it. In + * addition, it may be set to true, even if the final frame, after + * blending, is opaque. + */ + bool fHasAlphaWithinBounds; + + /** + * How this frame should be modified before decoding the next one. + */ + SkCodecAnimation::DisposalMethod fDisposalMethod; + + /** + * How this frame should blend with the prior frame. + */ + SkCodecAnimation::Blend fBlend; + + /** + * The rectangle updated by this frame. + * + * It may be empty, if the frame does not change the image. It will + * always be contained by SkCodec::dimensions(). + */ + SkIRect fFrameRect; + }; + + /** + * Return info about a single frame. + * + * Does not read through the stream, so it should be called after + * getFrameCount() to parse any frames that have not already been parsed. + * + * Only supported by animated (multi-frame) codecs. Note that this is a + * property of the codec (the SkCodec subclass), not the image. + * + * To elaborate, some codecs support animation (e.g. GIF). Others do not + * (e.g. BMP). Animated codecs can still represent single frame images. + * Calling getFrameInfo(0, etc) will return true for a single frame GIF + * even if the overall image is not animated (in that the pixels on screen + * do not change over time). When incrementally decoding a GIF image, we + * might only know that there's a single frame *so far*. + * + * For non-animated SkCodec subclasses, it's sufficient but not necessary + * for this method to always return false. + */ + bool getFrameInfo(int index, FrameInfo* info) const { + if (index < 0) { + return false; + } + return this->onGetFrameInfo(index, info); + } + + /** + * Return info about all the frames in the image. + * + * May require reading through the stream to determine info about the + * frames (including the count). + * + * As such, future decoding calls may require a rewind. + * + * This may return an empty vector for non-animated codecs. See the + * getFrameInfo(int, FrameInfo*) comment. + */ + std::vector<FrameInfo> getFrameInfo(); + + static constexpr int kRepetitionCountInfinite = -1; + + /** + * Return the number of times to repeat, if this image is animated. This number does not + * include the first play through of each frame. For example, a repetition count of 4 means + * that each frame is played 5 times and then the animation stops. + * + * It can return kRepetitionCountInfinite, a negative number, meaning that the animation + * should loop forever. + * + * May require reading the stream to find the repetition count. + * + * As such, future decoding calls may require a rewind. + * + * For still (non-animated) image codecs, this will return 0. + */ + int getRepetitionCount() { + return this->onGetRepetitionCount(); + } + + // Register a decoder at runtime by passing two function pointers: + // - peek() to return true if the span of bytes appears to be your encoded format; + // - make() to attempt to create an SkCodec from the given stream. + // Not thread safe. + static void Register( + bool (*peek)(const void*, size_t), + std::unique_ptr<SkCodec> (*make)(std::unique_ptr<SkStream>, SkCodec::Result*)); + +protected: + const SkEncodedInfo& getEncodedInfo() const { return fEncodedInfo; } + + using XformFormat = skcms_PixelFormat; + + SkCodec(SkEncodedInfo&&, + XformFormat srcFormat, + std::unique_ptr<SkStream>, + SkEncodedOrigin = kTopLeft_SkEncodedOrigin); + + virtual SkISize onGetScaledDimensions(float /*desiredScale*/) const { + // By default, scaling is not supported. + return this->dimensions(); + } + + // FIXME: What to do about subsets?? + /** + * Subclasses should override if they support dimensions other than the + * srcInfo's. + */ + virtual bool onDimensionsSupported(const SkISize&) { + return false; + } + + virtual SkEncodedImageFormat onGetEncodedFormat() const = 0; + + /** + * @param rowsDecoded When the encoded image stream is incomplete, this function + * will return kIncompleteInput and rowsDecoded will be set to + * the number of scanlines that were successfully decoded. + * This will allow getPixels() to fill the uninitialized memory. + */ + virtual Result onGetPixels(const SkImageInfo& info, + void* pixels, size_t rowBytes, const Options&, + int* rowsDecoded) = 0; + + virtual bool onQueryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes&, + SkYUVAPixmapInfo*) const { return false; } + + virtual Result onGetYUVAPlanes(const SkYUVAPixmaps&) { return kUnimplemented; } + + virtual bool onGetValidSubset(SkIRect* /*desiredSubset*/) const { + // By default, subsets are not supported. + return false; + } + + /** + * If the stream was previously read, attempt to rewind. + * + * If the stream needed to be rewound, call onRewind. + * @returns true if the codec is at the right position and can be used. + * false if there was a failure to rewind. + * + * This is called by getPixels(), getYUV8Planes(), startIncrementalDecode() and + * startScanlineDecode(). Subclasses may call if they need to rewind at another time. + */ + bool SK_WARN_UNUSED_RESULT rewindIfNeeded(); + + /** + * Called by rewindIfNeeded, if the stream needed to be rewound. + * + * Subclasses should do any set up needed after a rewind. + */ + virtual bool onRewind() { + return true; + } + + /** + * Get method for the input stream + */ + SkStream* stream() { + return fStream.get(); + } + + /** + * The remaining functions revolve around decoding scanlines. + */ + + /** + * Most images types will be kTopDown and will not need to override this function. + */ + virtual SkScanlineOrder onGetScanlineOrder() const { return kTopDown_SkScanlineOrder; } + + const SkImageInfo& dstInfo() const { return fDstInfo; } + + const Options& options() const { return fOptions; } + + /** + * Returns the number of scanlines that have been decoded so far. + * This is unaffected by the SkScanlineOrder. + * + * Returns -1 if we have not started a scanline decode. + */ + int currScanline() const { return fCurrScanline; } + + virtual int onOutputScanline(int inputScanline) const; + + /** + * Return whether we can convert to dst. + * + * Will be called for the appropriate frame, prior to initializing the colorXform. + */ + virtual bool conversionSupported(const SkImageInfo& dst, bool srcIsOpaque, + bool needsColorXform); + + // Some classes never need a colorXform e.g. + // - ICO uses its embedded codec's colorXform + // - WBMP is just Black/White + virtual bool usesColorXform() const { return true; } + void applyColorXform(void* dst, const void* src, int count) const; + + bool colorXform() const { return fXformTime != kNo_XformTime; } + bool xformOnDecode() const { return fXformTime == kDecodeRow_XformTime; } + + virtual int onGetFrameCount() { + return 1; + } + + virtual bool onGetFrameInfo(int, FrameInfo*) const { + return false; + } + + virtual int onGetRepetitionCount() { + return 0; + } + +private: + const SkEncodedInfo fEncodedInfo; + const XformFormat fSrcXformFormat; + std::unique_ptr<SkStream> fStream; + bool fNeedsRewind; + const SkEncodedOrigin fOrigin; + + SkImageInfo fDstInfo; + Options fOptions; + + enum XformTime { + kNo_XformTime, + kPalette_XformTime, + kDecodeRow_XformTime, + }; + XformTime fXformTime; + XformFormat fDstXformFormat; // Based on fDstInfo. + skcms_ICCProfile fDstProfile; + skcms_AlphaFormat fDstXformAlphaFormat; + + // Only meaningful during scanline decodes. + int fCurrScanline; + + bool fStartedIncrementalDecode; + + // Allows SkAndroidCodec to call handleFrameIndex (potentially decoding a prior frame and + // clearing to transparent) without SkCodec calling it, too. + bool fAndroidCodecHandlesFrameIndex; + + bool initializeColorXform(const SkImageInfo& dstInfo, SkEncodedInfo::Alpha, bool srcIsOpaque); + + /** + * Return whether these dimensions are supported as a scale. + * + * The codec may choose to cache the information about scale and subset. + * Either way, the same information will be passed to onGetPixels/onStart + * on success. + * + * This must return true for a size returned from getScaledDimensions. + */ + bool dimensionsSupported(const SkISize& dim) { + return dim == this->dimensions() || this->onDimensionsSupported(dim); + } + + /** + * For multi-framed images, return the object with information about the frames. + */ + virtual const SkFrameHolder* getFrameHolder() const { + return nullptr; + } + + /** + * Check for a valid Options.fFrameIndex, and decode prior frames if necessary. + * + * If androidCodec is not null, that means this SkCodec is owned by an SkAndroidCodec. In that + * case, the Options will be treated as an AndroidOptions, and SkAndroidCodec will be used to + * decode a prior frame, if a prior frame is needed. When such an owned SkCodec calls + * handleFrameIndex, it will immediately return kSuccess, since SkAndroidCodec already handled + * it. + */ + Result handleFrameIndex(const SkImageInfo&, void* pixels, size_t rowBytes, const Options&, + SkAndroidCodec* androidCodec = nullptr); + + // Methods for scanline decoding. + virtual Result onStartScanlineDecode(const SkImageInfo& /*dstInfo*/, + const Options& /*options*/) { + return kUnimplemented; + } + + virtual Result onStartIncrementalDecode(const SkImageInfo& /*dstInfo*/, void*, size_t, + const Options&) { + return kUnimplemented; + } + + virtual Result onIncrementalDecode(int*) { + return kUnimplemented; + } + + + virtual bool onSkipScanlines(int /*countLines*/) { return false; } + + virtual int onGetScanlines(void* /*dst*/, int /*countLines*/, size_t /*rowBytes*/) { return 0; } + + /** + * On an incomplete decode, getPixels() and getScanlines() will call this function + * to fill any uinitialized memory. + * + * @param dstInfo Contains the destination color type + * Contains the destination alpha type + * Contains the destination width + * The height stored in this info is unused + * @param dst Pointer to the start of destination pixel memory + * @param rowBytes Stride length in destination pixel memory + * @param zeroInit Indicates if memory is zero initialized + * @param linesRequested Number of lines that the client requested + * @param linesDecoded Number of lines that were successfully decoded + */ + void fillIncompleteImage(const SkImageInfo& dstInfo, void* dst, size_t rowBytes, + ZeroInitialized zeroInit, int linesRequested, int linesDecoded); + + /** + * Return an object which will allow forcing scanline decodes to sample in X. + * + * May create a sampler, if one is not currently being used. Otherwise, does + * not affect ownership. + * + * Only valid during scanline decoding or incremental decoding. + */ + virtual SkSampler* getSampler(bool /*createIfNecessary*/) { return nullptr; } + + friend class DM::CodecSrc; // for fillIncompleteImage + friend class SkSampledCodec; + friend class SkIcoCodec; + friend class SkAndroidCodec; // for fEncodedInfo +}; +#endif // SkCodec_DEFINED diff --git a/src/deps/skia/include/codec/SkCodecAnimation.h b/src/deps/skia/include/codec/SkCodecAnimation.h new file mode 100644 index 000000000..c5883e2af --- /dev/null +++ b/src/deps/skia/include/codec/SkCodecAnimation.h @@ -0,0 +1,61 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCodecAnimation_DEFINED +#define SkCodecAnimation_DEFINED + +namespace SkCodecAnimation { + /** + * This specifies how the next frame is based on this frame. + * + * Names are based on the GIF 89a spec. + * + * The numbers correspond to values in a GIF. + */ + enum class DisposalMethod { + /** + * The next frame should be drawn on top of this one. + * + * In a GIF, a value of 0 (not specified) is also treated as Keep. + */ + kKeep = 1, + + /** + * Similar to Keep, except the area inside this frame's rectangle + * should be cleared to the BackGround color (transparent) before + * drawing the next frame. + */ + kRestoreBGColor = 2, + + /** + * The next frame should be drawn on top of the previous frame - i.e. + * disregarding this one. + * + * In a GIF, a value of 4 is also treated as RestorePrevious. + */ + kRestorePrevious = 3, + }; + + /** + * How to blend the current frame. + */ + enum class Blend { + /** + * Blend with the prior frame as if using SkBlendMode::kSrcOver. + */ + kSrcOver, + + /** + * Blend with the prior frame as if using SkBlendMode::kSrc. + * + * This frame's pixels replace the destination pixels. + */ + kSrc, + }; + +} // namespace SkCodecAnimation +#endif // SkCodecAnimation_DEFINED diff --git a/src/deps/skia/include/codec/SkEncodedOrigin.h b/src/deps/skia/include/codec/SkEncodedOrigin.h new file mode 100644 index 000000000..19d083672 --- /dev/null +++ b/src/deps/skia/include/codec/SkEncodedOrigin.h @@ -0,0 +1,54 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkEncodedOrigin_DEFINED +#define SkEncodedOrigin_DEFINED + +#include "include/core/SkMatrix.h" + +// These values match the orientation www.exif.org/Exif2-2.PDF. +enum SkEncodedOrigin { + kTopLeft_SkEncodedOrigin = 1, // Default + kTopRight_SkEncodedOrigin = 2, // Reflected across y-axis + kBottomRight_SkEncodedOrigin = 3, // Rotated 180 + kBottomLeft_SkEncodedOrigin = 4, // Reflected across x-axis + kLeftTop_SkEncodedOrigin = 5, // Reflected across x-axis, Rotated 90 CCW + kRightTop_SkEncodedOrigin = 6, // Rotated 90 CW + kRightBottom_SkEncodedOrigin = 7, // Reflected across x-axis, Rotated 90 CW + kLeftBottom_SkEncodedOrigin = 8, // Rotated 90 CCW + kDefault_SkEncodedOrigin = kTopLeft_SkEncodedOrigin, + kLast_SkEncodedOrigin = kLeftBottom_SkEncodedOrigin, +}; + +/** + * Given an encoded origin and the width and height of the source data, returns a matrix + * that transforms the source rectangle with upper left corner at [0, 0] and origin to a correctly + * oriented destination rectangle of [0, 0, w, h]. + */ +static inline SkMatrix SkEncodedOriginToMatrix(SkEncodedOrigin origin, int w, int h) { + switch (origin) { + case kTopLeft_SkEncodedOrigin: return SkMatrix::I(); + case kTopRight_SkEncodedOrigin: return SkMatrix::MakeAll(-1, 0, w, 0, 1, 0, 0, 0, 1); + case kBottomRight_SkEncodedOrigin: return SkMatrix::MakeAll(-1, 0, w, 0, -1, h, 0, 0, 1); + case kBottomLeft_SkEncodedOrigin: return SkMatrix::MakeAll( 1, 0, 0, 0, -1, h, 0, 0, 1); + case kLeftTop_SkEncodedOrigin: return SkMatrix::MakeAll( 0, 1, 0, 1, 0, 0, 0, 0, 1); + case kRightTop_SkEncodedOrigin: return SkMatrix::MakeAll( 0, -1, w, 1, 0, 0, 0, 0, 1); + case kRightBottom_SkEncodedOrigin: return SkMatrix::MakeAll( 0, -1, w, -1, 0, h, 0, 0, 1); + case kLeftBottom_SkEncodedOrigin: return SkMatrix::MakeAll( 0, 1, 0, -1, 0, h, 0, 0, 1); + } + SK_ABORT("Unexpected origin"); +} + +/** + * Return true if the encoded origin includes a 90 degree rotation, in which case the width + * and height of the source data are swapped relative to a correctly oriented destination. + */ +static inline bool SkEncodedOriginSwapsWidthHeight(SkEncodedOrigin origin) { + return origin >= kLeftTop_SkEncodedOrigin; +} + +#endif // SkEncodedOrigin_DEFINED diff --git a/src/deps/skia/include/config/BUILD.bazel b/src/deps/skia/include/config/BUILD.bazel new file mode 100644 index 000000000..ad2d64d7b --- /dev/null +++ b/src/deps/skia/include/config/BUILD.bazel @@ -0,0 +1,7 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkUserConfig_hdr", + hdrs = ["SkUserConfig.h"], + visibility = ["//:__subpackages__"], +) diff --git a/src/deps/skia/include/config/SkUserConfig.h b/src/deps/skia/include/config/SkUserConfig.h new file mode 100644 index 000000000..313d324e4 --- /dev/null +++ b/src/deps/skia/include/config/SkUserConfig.h @@ -0,0 +1,89 @@ + +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + +#ifndef SkUserConfig_DEFINED +#define SkUserConfig_DEFINED + +/* SkTypes.h, the root of the public header files, includes this file + SkUserConfig.h after first initializing certain Skia defines, letting + this file change or augment those flags. + + Below are optional defines that add, subtract, or change default behavior + in Skia. Your port can locally edit this file to enable/disable flags as + you choose, or these can be delared on your command line (i.e. -Dfoo). + + By default, this include file will always default to having all of the flags + commented out, so including it will have no effect. +*/ + +/////////////////////////////////////////////////////////////////////////////// + +/* Skia has lots of debug-only code. Often this is just null checks or other + parameter checking, but sometimes it can be quite intrusive (e.g. check that + each 32bit pixel is in premultiplied form). This code can be very useful + during development, but will slow things down in a shipping product. + + By default, these mutually exclusive flags are defined in SkTypes.h, + based on the presence or absence of NDEBUG, but that decision can be changed + here. + */ +//#define SK_DEBUG +//#define SK_RELEASE + +/* To write debug messages to a console, skia will call SkDebugf(...) following + printf conventions (e.g. const char* format, ...). If you want to redirect + this to something other than printf, define yours here + */ +//#define SkDebugf(...) MyFunction(__VA_ARGS__) + +/* + * To specify a different default font cache limit, define this. If this is + * undefined, skia will use a built-in value. + */ +//#define SK_DEFAULT_FONT_CACHE_LIMIT (1024 * 1024) + +/* + * To specify the default size of the image cache, undefine this and set it to + * the desired value (in bytes). SkGraphics.h as a runtime API to set this + * value as well. If this is undefined, a built-in value will be used. + */ +//#define SK_DEFAULT_IMAGE_CACHE_LIMIT (1024 * 1024) + +/* Define this to set the upper limit for text to support LCD. Values that + are very large increase the cost in the font cache and draw slower, without + improving readability. If this is undefined, Skia will use its default + value (e.g. 48) + */ +//#define SK_MAX_SIZE_FOR_LCDTEXT 48 + +/* Change the kN32_SkColorType ordering to BGRA to work in X windows. + */ +//#define SK_R32_SHIFT 16 + + +/* Determines whether to build code that supports the GPU backend. Some classes + that are not GPU-specific, such as SkShader subclasses, have optional code + that is used allows them to interact with the GPU backend. If you'd like to + omit this code set SK_SUPPORT_GPU to 0. This also allows you to omit the gpu + directories from your include search path when you're not building the GPU + backend. Defaults to 1 (build the GPU code). + */ +//#define SK_SUPPORT_GPU 1 + +/* Skia makes use of histogram logging macros to trace the frequency of + * events. By default, Skia provides no-op versions of these macros. + * Skia consumers can provide their own definitions of these macros to + * integrate with their histogram collection backend. + */ +//#define SK_HISTOGRAM_BOOLEAN(name, sample) +//#define SK_HISTOGRAM_ENUMERATION(name, sample, enum_size) +//#define SK_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) +//#define SK_HISTOGRAM_MEMORY_KB(name, sample) + +#endif diff --git a/src/deps/skia/include/core/BUILD.bazel b/src/deps/skia/include/core/BUILD.bazel new file mode 100644 index 000000000..9b32d5a6e --- /dev/null +++ b/src/deps/skia/include/core/BUILD.bazel @@ -0,0 +1,951 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkAnnotation_hdr", + hdrs = ["SkAnnotation.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkBBHFactory_hdr", + hdrs = ["SkBBHFactory.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRect_hdr", + ":SkRefCnt_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkBitmap_hdr", + hdrs = ["SkBitmap.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkColor_hdr", + ":SkImageInfo_hdr", + ":SkMatrix_hdr", + ":SkPixmap_hdr", + ":SkPoint_hdr", + ":SkRefCnt_hdr", + ":SkShader_hdr", + ":SkTileMode_hdr", + ], +) + +generated_cc_atom( + name = "SkBlendMode_hdr", + hdrs = ["SkBlendMode.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkBlender_hdr", + hdrs = ["SkBlender.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBlendMode_hdr", + ":SkFlattenable_hdr", + ], +) + +generated_cc_atom( + name = "SkBlurTypes_hdr", + hdrs = ["SkBlurTypes.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkCanvasVirtualEnforcer_hdr", + hdrs = ["SkCanvasVirtualEnforcer.h"], + visibility = ["//:__subpackages__"], + deps = [":SkCanvas_hdr"], +) + +generated_cc_atom( + name = "SkCanvas_hdr", + hdrs = ["SkCanvas.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBlendMode_hdr", + ":SkClipOp_hdr", + ":SkColor_hdr", + ":SkFontTypes_hdr", + ":SkImageInfo_hdr", + ":SkM44_hdr", + ":SkMatrix_hdr", + ":SkPaint_hdr", + ":SkPoint_hdr", + ":SkRasterHandleAllocator_hdr", + ":SkRect_hdr", + ":SkRefCnt_hdr", + ":SkSamplingOptions_hdr", + ":SkScalar_hdr", + ":SkSize_hdr", + ":SkString_hdr", + ":SkSurfaceProps_hdr", + ":SkTypes_hdr", + "//include/private:SkDeque_hdr", + "//include/private:SkMacros_hdr", + ], +) + +generated_cc_atom( + name = "SkClipOp_hdr", + hdrs = ["SkClipOp.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkColorFilter_hdr", + hdrs = ["SkColorFilter.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBlendMode_hdr", + ":SkColor_hdr", + ":SkFlattenable_hdr", + ], +) + +generated_cc_atom( + name = "SkColorPriv_hdr", + hdrs = ["SkColorPriv.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkColor_hdr", + ":SkMath_hdr", + "//include/private:SkTPin_hdr", + "//include/private:SkTo_hdr", + ], +) + +generated_cc_atom( + name = "SkColorSpace_hdr", + hdrs = ["SkColorSpace.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRefCnt_hdr", + "//include/private:SkFixed_hdr", + "//include/private:SkOnce_hdr", + "//include/third_party/skcms:skcms_hdr", + ], +) + +generated_cc_atom( + name = "SkColor_hdr", + hdrs = ["SkColor.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkImageInfo_hdr", + ":SkScalar_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkContourMeasure_hdr", + hdrs = ["SkContourMeasure.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkPath_hdr", + ":SkRefCnt_hdr", + "//include/private:SkTDArray_hdr", + ], +) + +generated_cc_atom( + name = "SkCoverageMode_hdr", + hdrs = ["SkCoverageMode.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkCubicMap_hdr", + hdrs = ["SkCubicMap.h"], + visibility = ["//:__subpackages__"], + deps = [":SkPoint_hdr"], +) + +generated_cc_atom( + name = "SkDataTable_hdr", + hdrs = ["SkDataTable.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkData_hdr", + "//include/private:SkTDArray_hdr", + ], +) + +generated_cc_atom( + name = "SkData_hdr", + hdrs = ["SkData.h"], + visibility = ["//:__subpackages__"], + deps = [":SkRefCnt_hdr"], +) + +generated_cc_atom( + name = "SkDeferredDisplayListRecorder_hdr", + hdrs = ["SkDeferredDisplayListRecorder.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkDeferredDisplayList_hdr", + ":SkImageInfo_hdr", + ":SkImage_hdr", + ":SkRefCnt_hdr", + ":SkSurfaceCharacterization_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkDeferredDisplayList_hdr", + hdrs = ["SkDeferredDisplayList.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRefCnt_hdr", + ":SkSurfaceCharacterization_hdr", + ":SkTypes_hdr", + "//include/gpu:GrRecordingContext_hdr", + "//include/private:SkTArray_hdr", + ], +) + +generated_cc_atom( + name = "SkDocument_hdr", + hdrs = ["SkDocument.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRefCnt_hdr", + ":SkScalar_hdr", + ], +) + +generated_cc_atom( + name = "SkDrawLooper_hdr", + hdrs = ["SkDrawLooper.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBlurTypes_hdr", + ":SkColor_hdr", + ":SkFlattenable_hdr", + ":SkPoint_hdr", + ], +) + +generated_cc_atom( + name = "SkDrawable_hdr", + hdrs = ["SkDrawable.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkFlattenable_hdr", + ":SkImageInfo_hdr", + ":SkScalar_hdr", + ], +) + +generated_cc_atom( + name = "SkEncodedImageFormat_hdr", + hdrs = ["SkEncodedImageFormat.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkExecutor_hdr", + hdrs = ["SkExecutor.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkFlattenable_hdr", + hdrs = ["SkFlattenable.h"], + visibility = ["//:__subpackages__"], + deps = [":SkRefCnt_hdr"], +) + +generated_cc_atom( + name = "SkFontArguments_hdr", + hdrs = ["SkFontArguments.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkScalar_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkFontMetrics_hdr", + hdrs = ["SkFontMetrics.h"], + visibility = ["//:__subpackages__"], + deps = [":SkScalar_hdr"], +) + +generated_cc_atom( + name = "SkFontMgr_hdr", + hdrs = ["SkFontMgr.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkFontArguments_hdr", + ":SkFontStyle_hdr", + ":SkRefCnt_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkFontParameters_hdr", + hdrs = ["SkFontParameters.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkScalar_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkFontStyle_hdr", + hdrs = ["SkFontStyle.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkTypes_hdr", + "//include/private:SkTPin_hdr", + ], +) + +generated_cc_atom( + name = "SkFontTypes_hdr", + hdrs = ["SkFontTypes.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkFont_hdr", + hdrs = ["SkFont.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkFontTypes_hdr", + ":SkScalar_hdr", + ":SkTypeface_hdr", + ], +) + +generated_cc_atom( + name = "SkGraphics_hdr", + hdrs = ["SkGraphics.h"], + visibility = ["//:__subpackages__"], + deps = [":SkRefCnt_hdr"], +) + +generated_cc_atom( + name = "SkICC_hdr", + hdrs = ["SkICC.h"], + visibility = ["//:__subpackages__"], + deps = [":SkData_hdr"], +) + +generated_cc_atom( + name = "SkImageEncoder_hdr", + hdrs = ["SkImageEncoder.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBitmap_hdr", + ":SkData_hdr", + ":SkEncodedImageFormat_hdr", + ":SkPixmap_hdr", + ":SkStream_hdr", + ], +) + +generated_cc_atom( + name = "SkImageFilter_hdr", + hdrs = ["SkImageFilter.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkFlattenable_hdr", + ":SkMatrix_hdr", + ":SkRect_hdr", + ], +) + +generated_cc_atom( + name = "SkImageGenerator_hdr", + hdrs = ["SkImageGenerator.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBitmap_hdr", + ":SkColor_hdr", + ":SkImageInfo_hdr", + ":SkImage_hdr", + ":SkYUVAPixmaps_hdr", + "//include/private:SkTOptional_hdr", + ], +) + +generated_cc_atom( + name = "SkImageInfo_hdr", + hdrs = ["SkImageInfo.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkColorSpace_hdr", + ":SkMath_hdr", + ":SkRect_hdr", + ":SkSize_hdr", + "//include/private:SkTFitsIn_hdr", + "//include/private:SkTo_hdr", + ], +) + +generated_cc_atom( + name = "SkImage_hdr", + hdrs = ["SkImage.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkImageEncoder_hdr", + ":SkImageInfo_hdr", + ":SkRefCnt_hdr", + ":SkSamplingOptions_hdr", + ":SkScalar_hdr", + ":SkShader_hdr", + ":SkTileMode_hdr", + "//include/gpu:GrTypes_hdr", + "//include/private:SkTOptional_hdr", + ], +) + +generated_cc_atom( + name = "SkM44_hdr", + hdrs = ["SkM44.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMatrix_hdr", + ":SkRect_hdr", + ":SkScalar_hdr", + ], +) + +generated_cc_atom( + name = "SkMallocPixelRef_hdr", + hdrs = ["SkMallocPixelRef.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkPixelRef_hdr", + ":SkRefCnt_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkMaskFilter_hdr", + hdrs = ["SkMaskFilter.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBlurTypes_hdr", + ":SkCoverageMode_hdr", + ":SkFlattenable_hdr", + ":SkScalar_hdr", + ], +) + +generated_cc_atom( + name = "SkMath_hdr", + hdrs = ["SkMath.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkMatrix_hdr", + hdrs = ["SkMatrix.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRect_hdr", + "//include/private:SkMacros_hdr", + "//include/private:SkTo_hdr", + ], +) + +generated_cc_atom( + name = "SkMilestone_hdr", + hdrs = ["SkMilestone.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkOverdrawCanvas_hdr", + hdrs = ["SkOverdrawCanvas.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkCanvasVirtualEnforcer_hdr", + "//include/utils:SkNWayCanvas_hdr", + ], +) + +generated_cc_atom( + name = "SkPaint_hdr", + hdrs = ["SkPaint.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBlendMode_hdr", + ":SkColor_hdr", + ":SkRefCnt_hdr", + "//include/private:SkTOptional_hdr", + "//include/private:SkTo_hdr", + ], +) + +generated_cc_atom( + name = "SkPathBuilder_hdr", + hdrs = ["SkPathBuilder.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMatrix_hdr", + ":SkPathTypes_hdr", + ":SkPath_hdr", + "//include/private:SkTDArray_hdr", + ], +) + +generated_cc_atom( + name = "SkPathEffect_hdr", + hdrs = ["SkPathEffect.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkFlattenable_hdr", + ":SkPath_hdr", + ":SkScalar_hdr", + ], +) + +generated_cc_atom( + name = "SkPathMeasure_hdr", + hdrs = ["SkPathMeasure.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkContourMeasure_hdr", + ":SkPath_hdr", + "//include/private:SkTDArray_hdr", + ], +) + +generated_cc_atom( + name = "SkPathTypes_hdr", + hdrs = ["SkPathTypes.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkPath_hdr", + hdrs = ["SkPath.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMatrix_hdr", + ":SkPathTypes_hdr", + "//include/private:SkPathRef_hdr", + "//include/private:SkTo_hdr", + ], +) + +generated_cc_atom( + name = "SkPictureRecorder_hdr", + hdrs = ["SkPictureRecorder.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBBHFactory_hdr", + ":SkPicture_hdr", + ":SkRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "SkPicture_hdr", + hdrs = ["SkPicture.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRect_hdr", + ":SkRefCnt_hdr", + ":SkSamplingOptions_hdr", + ":SkShader_hdr", + ":SkTileMode_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkPixelRef_hdr", + hdrs = ["SkPixelRef.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBitmap_hdr", + ":SkImageInfo_hdr", + ":SkPixmap_hdr", + ":SkRefCnt_hdr", + ":SkSize_hdr", + "//include/private:SkIDChangeListener_hdr", + "//include/private:SkMutex_hdr", + "//include/private:SkTDArray_hdr", + ], +) + +generated_cc_atom( + name = "SkPixmap_hdr", + hdrs = ["SkPixmap.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkColor_hdr", + ":SkImageInfo_hdr", + ":SkSamplingOptions_hdr", + ], +) + +generated_cc_atom( + name = "SkPngChunkReader_hdr", + hdrs = ["SkPngChunkReader.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRefCnt_hdr", + ":SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkPoint3_hdr", + hdrs = ["SkPoint3.h"], + visibility = ["//:__subpackages__"], + deps = [":SkPoint_hdr"], +) + +generated_cc_atom( + name = "SkPoint_hdr", + hdrs = ["SkPoint.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMath_hdr", + ":SkScalar_hdr", + "//include/private:SkSafe32_hdr", + ], +) + +generated_cc_atom( + name = "SkPromiseImageTexture_hdr", + hdrs = ["SkPromiseImageTexture.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRefCnt_hdr", + ":SkTypes_hdr", + "//include/gpu:GrBackendSurface_hdr", + ], +) + +generated_cc_atom( + name = "SkRRect_hdr", + hdrs = ["SkRRect.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkPoint_hdr", + ":SkRect_hdr", + ], +) + +generated_cc_atom( + name = "SkRSXform_hdr", + hdrs = ["SkRSXform.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkPoint_hdr", + ":SkSize_hdr", + ], +) + +generated_cc_atom( + name = "SkRasterHandleAllocator_hdr", + hdrs = ["SkRasterHandleAllocator.h"], + visibility = ["//:__subpackages__"], + deps = [":SkImageInfo_hdr"], +) + +generated_cc_atom( + name = "SkRect_hdr", + hdrs = ["SkRect.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkPoint_hdr", + ":SkSize_hdr", + "//include/private:SkSafe32_hdr", + "//include/private:SkTFitsIn_hdr", + ], +) + +generated_cc_atom( + name = "SkRefCnt_hdr", + hdrs = ["SkRefCnt.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkRegion_hdr", + hdrs = ["SkRegion.h"], + visibility = ["//:__subpackages__"], + deps = [":SkRect_hdr"], +) + +generated_cc_atom( + name = "SkSamplingOptions_hdr", + hdrs = ["SkSamplingOptions.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkScalar_hdr", + hdrs = ["SkScalar.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/private:SkFloatingPoint_hdr"], +) + +generated_cc_atom( + name = "SkSerialProcs_hdr", + hdrs = ["SkSerialProcs.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkImage_hdr", + ":SkPicture_hdr", + ":SkTypeface_hdr", + ], +) + +generated_cc_atom( + name = "SkShader_hdr", + hdrs = ["SkShader.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkBlendMode_hdr", + ":SkColor_hdr", + ":SkFlattenable_hdr", + ":SkImageInfo_hdr", + ":SkMatrix_hdr", + ":SkTileMode_hdr", + ], +) + +generated_cc_atom( + name = "SkSize_hdr", + hdrs = ["SkSize.h"], + visibility = ["//:__subpackages__"], + deps = [":SkScalar_hdr"], +) + +generated_cc_atom( + name = "SkSpan_hdr", + hdrs = ["SkSpan.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/private:SkTLogic_hdr"], +) + +generated_cc_atom( + name = "SkStream_hdr", + hdrs = ["SkStream.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkData_hdr", + ":SkRefCnt_hdr", + ":SkScalar_hdr", + "//include/private:SkTo_hdr", + ], +) + +generated_cc_atom( + name = "SkStringView_hdr", + hdrs = ["SkStringView.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkString_hdr", + hdrs = ["SkString.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRefCnt_hdr", + ":SkScalar_hdr", + ":SkTypes_hdr", + "//include/private:SkMalloc_hdr", + "//include/private:SkTArray_hdr", + "//include/private:SkTo_hdr", + ], +) + +generated_cc_atom( + name = "SkStrokeRec_hdr", + hdrs = ["SkStrokeRec.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkPaint_hdr", + "//include/private:SkMacros_hdr", + ], +) + +generated_cc_atom( + name = "SkSurfaceCharacterization_hdr", + hdrs = ["SkSurfaceCharacterization.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkColorSpace_hdr", + ":SkImageInfo_hdr", + ":SkRefCnt_hdr", + ":SkSurfaceProps_hdr", + "//include/gpu:GrBackendSurface_hdr", + "//include/gpu:GrContextThreadSafeProxy_hdr", + "//include/gpu:GrTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkSurfaceProps_hdr", + hdrs = ["SkSurfaceProps.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkSurface_hdr", + hdrs = ["SkSurface.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkImage_hdr", + ":SkPixmap_hdr", + ":SkRefCnt_hdr", + ":SkSurfaceProps_hdr", + "//include/gpu:GrTypes_hdr", + "//include/gpu/mtl:GrMtlTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkSwizzle_hdr", + hdrs = ["SkSwizzle.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkTextBlob_hdr", + hdrs = ["SkTextBlob.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkFont_hdr", + ":SkPaint_hdr", + ":SkRefCnt_hdr", + ":SkString_hdr", + "//include/private:SkTemplates_hdr", + ], +) + +generated_cc_atom( + name = "SkTileMode_hdr", + hdrs = ["SkTileMode.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkTime_hdr", + hdrs = ["SkTime.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkTypes_hdr", + "//include/private:SkMacros_hdr", + ], +) + +generated_cc_atom( + name = "SkTraceMemoryDump_hdr", + hdrs = ["SkTraceMemoryDump.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkTypeface_hdr", + hdrs = ["SkTypeface.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkFontArguments_hdr", + ":SkFontParameters_hdr", + ":SkFontStyle_hdr", + ":SkFontTypes_hdr", + ":SkRect_hdr", + ":SkString_hdr", + "//include/private:SkOnce_hdr", + "//include/private:SkWeakRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "SkTypes_hdr", + hdrs = ["SkTypes.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/config:SkUserConfig_hdr"], +) + +generated_cc_atom( + name = "SkUnPreMultiply_hdr", + hdrs = ["SkUnPreMultiply.h"], + visibility = ["//:__subpackages__"], + deps = [":SkColor_hdr"], +) + +generated_cc_atom( + name = "SkVertices_hdr", + hdrs = ["SkVertices.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkColor_hdr", + ":SkRect_hdr", + ":SkRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "SkYUVAInfo_hdr", + hdrs = ["SkYUVAInfo.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkImageInfo_hdr", + ":SkSize_hdr", + "//include/codec:SkEncodedOrigin_hdr", + ], +) + +generated_cc_atom( + name = "SkYUVAPixmaps_hdr", + hdrs = ["SkYUVAPixmaps.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkData_hdr", + ":SkImageInfo_hdr", + ":SkPixmap_hdr", + ":SkYUVAInfo_hdr", + "//include/private:SkTo_hdr", + ], +) + +generated_cc_atom( + name = "SkCustomMesh_hdr", + hdrs = ["SkCustomMesh.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkColorSpace_hdr", + ":SkImageInfo_hdr", + ":SkRect_hdr", + ":SkRefCnt_hdr", + ":SkSpan_hdr", + ":SkString_hdr", + ":SkTypes_hdr", + ], +) diff --git a/src/deps/skia/include/core/SkAnnotation.h b/src/deps/skia/include/core/SkAnnotation.h new file mode 100644 index 000000000..9048bb6b6 --- /dev/null +++ b/src/deps/skia/include/core/SkAnnotation.h @@ -0,0 +1,50 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkAnnotation_DEFINED +#define SkAnnotation_DEFINED + +#include "include/core/SkTypes.h" + +class SkData; +struct SkPoint; +struct SkRect; +class SkCanvas; + +/** + * Annotate the canvas by associating the specified URL with the + * specified rectangle (in local coordinates, just like drawRect). + * + * If the backend of this canvas does not support annotations, this call is + * safely ignored. + * + * The caller is responsible for managing its ownership of the SkData. + */ +SK_API void SkAnnotateRectWithURL(SkCanvas*, const SkRect&, SkData*); + +/** + * Annotate the canvas by associating a name with the specified point. + * + * If the backend of this canvas does not support annotations, this call is + * safely ignored. + * + * The caller is responsible for managing its ownership of the SkData. + */ +SK_API void SkAnnotateNamedDestination(SkCanvas*, const SkPoint&, SkData*); + +/** + * Annotate the canvas by making the specified rectangle link to a named + * destination. + * + * If the backend of this canvas does not support annotations, this call is + * safely ignored. + * + * The caller is responsible for managing its ownership of the SkData. + */ +SK_API void SkAnnotateLinkToDestination(SkCanvas*, const SkRect&, SkData*); + +#endif diff --git a/src/deps/skia/include/core/SkBBHFactory.h b/src/deps/skia/include/core/SkBBHFactory.h new file mode 100644 index 000000000..2507d0f15 --- /dev/null +++ b/src/deps/skia/include/core/SkBBHFactory.h @@ -0,0 +1,63 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBBHFactory_DEFINED +#define SkBBHFactory_DEFINED + +#include "include/core/SkRect.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" +#include <vector> + +class SkBBoxHierarchy : public SkRefCnt { +public: + struct Metadata { + bool isDraw; // The corresponding SkRect bounds a draw command, not a pure state change. + }; + + /** + * Insert N bounding boxes into the hierarchy. + */ + virtual void insert(const SkRect[], int N) = 0; + virtual void insert(const SkRect[], const Metadata[], int N); + + /** + * Populate results with the indices of bounding boxes intersecting that query. + */ + virtual void search(const SkRect& query, std::vector<int>* results) const = 0; + + /** + * Return approximate size in memory of *this. + */ + virtual size_t bytesUsed() const = 0; + +protected: + SkBBoxHierarchy() = default; + SkBBoxHierarchy(const SkBBoxHierarchy&) = delete; + SkBBoxHierarchy& operator=(const SkBBoxHierarchy&) = delete; +}; + +class SK_API SkBBHFactory { +public: + /** + * Allocate a new SkBBoxHierarchy. Return NULL on failure. + */ + virtual sk_sp<SkBBoxHierarchy> operator()() const = 0; + virtual ~SkBBHFactory() {} + +protected: + SkBBHFactory() = default; + SkBBHFactory(const SkBBHFactory&) = delete; + SkBBHFactory& operator=(const SkBBHFactory&) = delete; +}; + +class SK_API SkRTreeFactory : public SkBBHFactory { +public: + sk_sp<SkBBoxHierarchy> operator()() const override; +}; + +#endif diff --git a/src/deps/skia/include/core/SkBitmap.h b/src/deps/skia/include/core/SkBitmap.h new file mode 100644 index 000000000..088280fbe --- /dev/null +++ b/src/deps/skia/include/core/SkBitmap.h @@ -0,0 +1,1212 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBitmap_DEFINED +#define SkBitmap_DEFINED + +#include "include/core/SkColor.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkMatrix.h" +#include "include/core/SkPixmap.h" +#include "include/core/SkPoint.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkShader.h" +#include "include/core/SkTileMode.h" + +class SkBitmap; +struct SkMask; +class SkMipmap; +struct SkIRect; +struct SkRect; +class SkPaint; +class SkPixelRef; +class SkShader; + +/** \class SkBitmap + SkBitmap describes a two-dimensional raster pixel array. SkBitmap is built on + SkImageInfo, containing integer width and height, SkColorType and SkAlphaType + describing the pixel format, and SkColorSpace describing the range of colors. + SkBitmap points to SkPixelRef, which describes the physical array of pixels. + SkImageInfo bounds may be located anywhere fully inside SkPixelRef bounds. + + SkBitmap can be drawn using SkCanvas. SkBitmap can be a drawing destination for SkCanvas + draw member functions. SkBitmap flexibility as a pixel container limits some + optimizations available to the target platform. + + If pixel array is primarily read-only, use SkImage for better performance. + If pixel array is primarily written to, use SkSurface for better performance. + + Declaring SkBitmap const prevents altering SkImageInfo: the SkBitmap height, width, + and so on cannot change. It does not affect SkPixelRef: a caller may write its + pixels. Declaring SkBitmap const affects SkBitmap configuration, not its contents. + + SkBitmap is not thread safe. Each thread must have its own copy of SkBitmap fields, + although threads may share the underlying pixel array. +*/ +class SK_API SkBitmap { +public: + class SK_API Allocator; + + /** Creates an empty SkBitmap without pixels, with kUnknown_SkColorType, + kUnknown_SkAlphaType, and with a width and height of zero. SkPixelRef origin is + set to (0, 0). + + Use setInfo() to associate SkColorType, SkAlphaType, width, and height + after SkBitmap has been created. + + @return empty SkBitmap + + example: https://fiddle.skia.org/c/@Bitmap_empty_constructor + */ + SkBitmap(); + + /** Copies settings from src to returned SkBitmap. Shares pixels if src has pixels + allocated, so both bitmaps reference the same pixels. + + @param src SkBitmap to copy SkImageInfo, and share SkPixelRef + @return copy of src + + example: https://fiddle.skia.org/c/@Bitmap_copy_const_SkBitmap + */ + SkBitmap(const SkBitmap& src); + + /** Copies settings from src to returned SkBitmap. Moves ownership of src pixels to + SkBitmap. + + @param src SkBitmap to copy SkImageInfo, and reassign SkPixelRef + @return copy of src + + example: https://fiddle.skia.org/c/@Bitmap_move_SkBitmap + */ + SkBitmap(SkBitmap&& src); + + /** Decrements SkPixelRef reference count, if SkPixelRef is not nullptr. + */ + ~SkBitmap(); + + /** Copies settings from src to returned SkBitmap. Shares pixels if src has pixels + allocated, so both bitmaps reference the same pixels. + + @param src SkBitmap to copy SkImageInfo, and share SkPixelRef + @return copy of src + + example: https://fiddle.skia.org/c/@Bitmap_copy_operator + */ + SkBitmap& operator=(const SkBitmap& src); + + /** Copies settings from src to returned SkBitmap. Moves ownership of src pixels to + SkBitmap. + + @param src SkBitmap to copy SkImageInfo, and reassign SkPixelRef + @return copy of src + + example: https://fiddle.skia.org/c/@Bitmap_move_operator + */ + SkBitmap& operator=(SkBitmap&& src); + + /** Swaps the fields of the two bitmaps. + + @param other SkBitmap exchanged with original + + example: https://fiddle.skia.org/c/@Bitmap_swap + */ + void swap(SkBitmap& other); + + /** Returns a constant reference to the SkPixmap holding the SkBitmap pixel + address, row bytes, and SkImageInfo. + + @return reference to SkPixmap describing this SkBitmap + */ + const SkPixmap& pixmap() const { return fPixmap; } + + /** Returns width, height, SkAlphaType, SkColorType, and SkColorSpace. + + @return reference to SkImageInfo + */ + const SkImageInfo& info() const { return fPixmap.info(); } + + /** Returns pixel count in each row. Should be equal or less than + rowBytes() / info().bytesPerPixel(). + + May be less than pixelRef().width(). Will not exceed pixelRef().width() less + pixelRefOrigin().fX. + + @return pixel width in SkImageInfo + */ + int width() const { return fPixmap.width(); } + + /** Returns pixel row count. + + Maybe be less than pixelRef().height(). Will not exceed pixelRef().height() less + pixelRefOrigin().fY. + + @return pixel height in SkImageInfo + */ + int height() const { return fPixmap.height(); } + + SkColorType colorType() const { return fPixmap.colorType(); } + + SkAlphaType alphaType() const { return fPixmap.alphaType(); } + + /** Returns SkColorSpace, the range of colors, associated with SkImageInfo. The + reference count of SkColorSpace is unchanged. The returned SkColorSpace is + immutable. + + @return SkColorSpace in SkImageInfo, or nullptr + */ + SkColorSpace* colorSpace() const { return fPixmap.colorSpace(); } + + /** Returns smart pointer to SkColorSpace, the range of colors, associated with + SkImageInfo. The smart pointer tracks the number of objects sharing this + SkColorSpace reference so the memory is released when the owners destruct. + + The returned SkColorSpace is immutable. + + @return SkColorSpace in SkImageInfo wrapped in a smart pointer + */ + sk_sp<SkColorSpace> refColorSpace() const { return fPixmap.info().refColorSpace(); } + + /** Returns number of bytes per pixel required by SkColorType. + Returns zero if colorType( is kUnknown_SkColorType. + + @return bytes in pixel + */ + int bytesPerPixel() const { return fPixmap.info().bytesPerPixel(); } + + /** Returns number of pixels that fit on row. Should be greater than or equal to + width(). + + @return maximum pixels per row + */ + int rowBytesAsPixels() const { return fPixmap.rowBytesAsPixels(); } + + /** Returns bit shift converting row bytes to row pixels. + Returns zero for kUnknown_SkColorType. + + @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes + */ + int shiftPerPixel() const { return fPixmap.shiftPerPixel(); } + + /** Returns true if either width() or height() are zero. + + Does not check if SkPixelRef is nullptr; call drawsNothing() to check width(), + height(), and SkPixelRef. + + @return true if dimensions do not enclose area + */ + bool empty() const { return fPixmap.info().isEmpty(); } + + /** Returns true if SkPixelRef is nullptr. + + Does not check if width() or height() are zero; call drawsNothing() to check + width(), height(), and SkPixelRef. + + @return true if no SkPixelRef is associated + */ + bool isNull() const { return nullptr == fPixelRef; } + + /** Returns true if width() or height() are zero, or if SkPixelRef is nullptr. + If true, SkBitmap has no effect when drawn or drawn into. + + @return true if drawing has no effect + */ + bool drawsNothing() const { + return this->empty() || this->isNull(); + } + + /** Returns row bytes, the interval from one pixel row to the next. Row bytes + is at least as large as: width() * info().bytesPerPixel(). + + Returns zero if colorType() is kUnknown_SkColorType, or if row bytes supplied to + setInfo() is not large enough to hold a row of pixels. + + @return byte length of pixel row + */ + size_t rowBytes() const { return fPixmap.rowBytes(); } + + /** Sets SkAlphaType, if alphaType is compatible with SkColorType. + Returns true unless alphaType is kUnknown_SkAlphaType and current SkAlphaType + is not kUnknown_SkAlphaType. + + Returns true if SkColorType is kUnknown_SkColorType. alphaType is ignored, and + SkAlphaType remains kUnknown_SkAlphaType. + + Returns true if SkColorType is kRGB_565_SkColorType or kGray_8_SkColorType. + alphaType is ignored, and SkAlphaType remains kOpaque_SkAlphaType. + + If SkColorType is kARGB_4444_SkColorType, kRGBA_8888_SkColorType, + kBGRA_8888_SkColorType, or kRGBA_F16_SkColorType: returns true unless + alphaType is kUnknown_SkAlphaType and SkAlphaType is not kUnknown_SkAlphaType. + If SkAlphaType is kUnknown_SkAlphaType, alphaType is ignored. + + If SkColorType is kAlpha_8_SkColorType, returns true unless + alphaType is kUnknown_SkAlphaType and SkAlphaType is not kUnknown_SkAlphaType. + If SkAlphaType is kUnknown_SkAlphaType, alphaType is ignored. If alphaType is + kUnpremul_SkAlphaType, it is treated as kPremul_SkAlphaType. + + This changes SkAlphaType in SkPixelRef; all bitmaps sharing SkPixelRef + are affected. + + @return true if SkAlphaType is set + + example: https://fiddle.skia.org/c/@Bitmap_setAlphaType + */ + bool setAlphaType(SkAlphaType alphaType); + + /** Returns pixel address, the base address corresponding to the pixel origin. + + @return pixel address + */ + void* getPixels() const { return fPixmap.writable_addr(); } + + /** Returns minimum memory required for pixel storage. + Does not include unused memory on last row when rowBytesAsPixels() exceeds width(). + Returns SIZE_MAX if result does not fit in size_t. + Returns zero if height() or width() is 0. + Returns height() times rowBytes() if colorType() is kUnknown_SkColorType. + + @return size in bytes of image buffer + */ + size_t computeByteSize() const { return fPixmap.computeByteSize(); } + + /** Returns true if pixels can not change. + + Most immutable SkBitmap checks trigger an assert only on debug builds. + + @return true if pixels are immutable + + example: https://fiddle.skia.org/c/@Bitmap_isImmutable + */ + bool isImmutable() const; + + /** Sets internal flag to mark SkBitmap as immutable. Once set, pixels can not change. + Any other bitmap sharing the same SkPixelRef are also marked as immutable. + Once SkPixelRef is marked immutable, the setting cannot be cleared. + + Writing to immutable SkBitmap pixels triggers an assert on debug builds. + + example: https://fiddle.skia.org/c/@Bitmap_setImmutable + */ + void setImmutable(); + + /** Returns true if SkAlphaType is set to hint that all pixels are opaque; their + alpha value is implicitly or explicitly 1.0. If true, and all pixels are + not opaque, Skia may draw incorrectly. + + Does not check if SkColorType allows alpha, or if any pixel value has + transparency. + + @return true if SkImageInfo SkAlphaType is kOpaque_SkAlphaType + */ + bool isOpaque() const { + return SkAlphaTypeIsOpaque(this->alphaType()); + } + + /** Resets to its initial state; all fields are set to zero, as if SkBitmap had + been initialized by SkBitmap(). + + Sets width, height, row bytes to zero; pixel address to nullptr; SkColorType to + kUnknown_SkColorType; and SkAlphaType to kUnknown_SkAlphaType. + + If SkPixelRef is allocated, its reference count is decreased by one, releasing + its memory if SkBitmap is the sole owner. + + example: https://fiddle.skia.org/c/@Bitmap_reset + */ + void reset(); + + /** Returns true if all pixels are opaque. SkColorType determines how pixels + are encoded, and whether pixel describes alpha. Returns true for SkColorType + without alpha in each pixel; for other SkColorType, returns true if all + pixels have alpha values equivalent to 1.0 or greater. + + For SkColorType kRGB_565_SkColorType or kGray_8_SkColorType: always + returns true. For SkColorType kAlpha_8_SkColorType, kBGRA_8888_SkColorType, + kRGBA_8888_SkColorType: returns true if all pixel alpha values are 255. + For SkColorType kARGB_4444_SkColorType: returns true if all pixel alpha values are 15. + For kRGBA_F16_SkColorType: returns true if all pixel alpha values are 1.0 or + greater. + + Returns false for kUnknown_SkColorType. + + @param bm SkBitmap to check + @return true if all pixels have opaque values or SkColorType is opaque + */ + static bool ComputeIsOpaque(const SkBitmap& bm) { + return bm.pixmap().computeIsOpaque(); + } + + /** Returns SkRect { 0, 0, width(), height() }. + + @param bounds container for floating point rectangle + + example: https://fiddle.skia.org/c/@Bitmap_getBounds + */ + void getBounds(SkRect* bounds) const; + + /** Returns SkIRect { 0, 0, width(), height() }. + + @param bounds container for integral rectangle + + example: https://fiddle.skia.org/c/@Bitmap_getBounds_2 + */ + void getBounds(SkIRect* bounds) const; + + /** Returns SkIRect { 0, 0, width(), height() }. + + @return integral rectangle from origin to width() and height() + */ + SkIRect bounds() const { return fPixmap.info().bounds(); } + + /** Returns SkISize { width(), height() }. + + @return integral size of width() and height() + */ + SkISize dimensions() const { return fPixmap.info().dimensions(); } + + /** Returns the bounds of this bitmap, offset by its SkPixelRef origin. + + @return bounds within SkPixelRef bounds + */ + SkIRect getSubset() const { + SkIPoint origin = this->pixelRefOrigin(); + return SkIRect::MakeXYWH(origin.x(), origin.y(), this->width(), this->height()); + } + + /** Sets width, height, SkAlphaType, SkColorType, SkColorSpace, and optional + rowBytes. Frees pixels, and returns true if successful. + + imageInfo.alphaType() may be altered to a value permitted by imageInfo.colorSpace(). + If imageInfo.colorType() is kUnknown_SkColorType, imageInfo.alphaType() is + set to kUnknown_SkAlphaType. + If imageInfo.colorType() is kAlpha_8_SkColorType and imageInfo.alphaType() is + kUnpremul_SkAlphaType, imageInfo.alphaType() is replaced by kPremul_SkAlphaType. + If imageInfo.colorType() is kRGB_565_SkColorType or kGray_8_SkColorType, + imageInfo.alphaType() is set to kOpaque_SkAlphaType. + If imageInfo.colorType() is kARGB_4444_SkColorType, kRGBA_8888_SkColorType, + kBGRA_8888_SkColorType, or kRGBA_F16_SkColorType: imageInfo.alphaType() remains + unchanged. + + rowBytes must equal or exceed imageInfo.minRowBytes(). If imageInfo.colorSpace() is + kUnknown_SkColorType, rowBytes is ignored and treated as zero; for all other + SkColorSpace values, rowBytes of zero is treated as imageInfo.minRowBytes(). + + Calls reset() and returns false if: + - rowBytes exceeds 31 bits + - imageInfo.width() is negative + - imageInfo.height() is negative + - rowBytes is positive and less than imageInfo.width() times imageInfo.bytesPerPixel() + + @param imageInfo contains width, height, SkAlphaType, SkColorType, SkColorSpace + @param rowBytes imageInfo.minRowBytes() or larger; or zero + @return true if SkImageInfo set successfully + + example: https://fiddle.skia.org/c/@Bitmap_setInfo + */ + bool setInfo(const SkImageInfo& imageInfo, size_t rowBytes = 0); + + /** \enum SkBitmap::AllocFlags + AllocFlags is obsolete. We always zero pixel memory when allocated. + */ + enum AllocFlags { + kZeroPixels_AllocFlag = 1 << 0, //!< zero pixel memory. No effect. This is the default. + }; + + /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel + memory. Memory is zeroed. + + Returns false and calls reset() if SkImageInfo could not be set, or memory could + not be allocated, or memory could not optionally be zeroed. + + On most platforms, allocating pixel memory may succeed even though there is + not sufficient memory to hold pixels; allocation does not take place + until the pixels are written to. The actual behavior depends on the platform + implementation of calloc(). + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + @param flags kZeroPixels_AllocFlag, or zero + @return true if pixels allocation is successful + */ + bool SK_WARN_UNUSED_RESULT tryAllocPixelsFlags(const SkImageInfo& info, uint32_t flags); + + /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel + memory. Memory is zeroed. + + Aborts execution if SkImageInfo could not be set, or memory could + not be allocated, or memory could not optionally + be zeroed. Abort steps may be provided by the user at compile time by defining + SK_ABORT. + + On most platforms, allocating pixel memory may succeed even though there is + not sufficient memory to hold pixels; allocation does not take place + until the pixels are written to. The actual behavior depends on the platform + implementation of calloc(). + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + @param flags kZeroPixels_AllocFlag, or zero + + example: https://fiddle.skia.org/c/@Bitmap_allocPixelsFlags + */ + void allocPixelsFlags(const SkImageInfo& info, uint32_t flags); + + /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel + memory. rowBytes must equal or exceed info.width() times info.bytesPerPixel(), + or equal zero. Pass in zero for rowBytes to compute the minimum valid value. + + Returns false and calls reset() if SkImageInfo could not be set, or memory could + not be allocated. + + On most platforms, allocating pixel memory may succeed even though there is + not sufficient memory to hold pixels; allocation does not take place + until the pixels are written to. The actual behavior depends on the platform + implementation of malloc(). + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + @param rowBytes size of pixel row or larger; may be zero + @return true if pixel storage is allocated + */ + bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo& info, size_t rowBytes); + + /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel + memory. rowBytes must equal or exceed info.width() times info.bytesPerPixel(), + or equal zero. Pass in zero for rowBytes to compute the minimum valid value. + + Aborts execution if SkImageInfo could not be set, or memory could + not be allocated. Abort steps may be provided by + the user at compile time by defining SK_ABORT. + + On most platforms, allocating pixel memory may succeed even though there is + not sufficient memory to hold pixels; allocation does not take place + until the pixels are written to. The actual behavior depends on the platform + implementation of malloc(). + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + @param rowBytes size of pixel row or larger; may be zero + + example: https://fiddle.skia.org/c/@Bitmap_allocPixels + */ + void allocPixels(const SkImageInfo& info, size_t rowBytes); + + /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel + memory. + + Returns false and calls reset() if SkImageInfo could not be set, or memory could + not be allocated. + + On most platforms, allocating pixel memory may succeed even though there is + not sufficient memory to hold pixels; allocation does not take place + until the pixels are written to. The actual behavior depends on the platform + implementation of malloc(). + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + @return true if pixel storage is allocated + */ + bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo& info) { + return this->tryAllocPixels(info, info.minRowBytes()); + } + + /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel + memory. + + Aborts execution if SkImageInfo could not be set, or memory could + not be allocated. Abort steps may be provided by + the user at compile time by defining SK_ABORT. + + On most platforms, allocating pixel memory may succeed even though there is + not sufficient memory to hold pixels; allocation does not take place + until the pixels are written to. The actual behavior depends on the platform + implementation of malloc(). + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + + example: https://fiddle.skia.org/c/@Bitmap_allocPixels_2 + */ + void allocPixels(const SkImageInfo& info); + + /** Sets SkImageInfo to width, height, and native color type; and allocates + pixel memory. If isOpaque is true, sets SkImageInfo to kOpaque_SkAlphaType; + otherwise, sets to kPremul_SkAlphaType. + + Calls reset() and returns false if width exceeds 29 bits or is negative, + or height is negative. + + Returns false if allocation fails. + + Use to create SkBitmap that matches SkPMColor, the native pixel arrangement on + the platform. SkBitmap drawn to output device skips converting its pixel format. + + @param width pixel column count; must be zero or greater + @param height pixel row count; must be zero or greater + @param isOpaque true if pixels do not have transparency + @return true if pixel storage is allocated + */ + bool SK_WARN_UNUSED_RESULT tryAllocN32Pixels(int width, int height, bool isOpaque = false); + + /** Sets SkImageInfo to width, height, and the native color type; and allocates + pixel memory. If isOpaque is true, sets SkImageInfo to kOpaque_SkAlphaType; + otherwise, sets to kPremul_SkAlphaType. + + Aborts if width exceeds 29 bits or is negative, or height is negative, or + allocation fails. Abort steps may be provided by the user at compile time by + defining SK_ABORT. + + Use to create SkBitmap that matches SkPMColor, the native pixel arrangement on + the platform. SkBitmap drawn to output device skips converting its pixel format. + + @param width pixel column count; must be zero or greater + @param height pixel row count; must be zero or greater + @param isOpaque true if pixels do not have transparency + + example: https://fiddle.skia.org/c/@Bitmap_allocN32Pixels + */ + void allocN32Pixels(int width, int height, bool isOpaque = false); + + /** Sets SkImageInfo to info following the rules in setInfo(), and creates SkPixelRef + containing pixels and rowBytes. releaseProc, if not nullptr, is called + immediately on failure or when pixels are no longer referenced. context may be + nullptr. + + If SkImageInfo could not be set, or rowBytes is less than info.minRowBytes(): + calls releaseProc if present, calls reset(), and returns false. + + Otherwise, if pixels equals nullptr: sets SkImageInfo, calls releaseProc if + present, returns true. + + If SkImageInfo is set, pixels is not nullptr, and releaseProc is not nullptr: + when pixels are no longer referenced, calls releaseProc with pixels and context + as parameters. + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + @param pixels address or pixel storage; may be nullptr + @param rowBytes size of pixel row or larger + @param releaseProc function called when pixels can be deleted; may be nullptr + @param context caller state passed to releaseProc; may be nullptr + @return true if SkImageInfo is set to info + */ + bool installPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, + void (*releaseProc)(void* addr, void* context), void* context); + + /** Sets SkImageInfo to info following the rules in setInfo(), and creates SkPixelRef + containing pixels and rowBytes. + + If SkImageInfo could not be set, or rowBytes is less than info.minRowBytes(): + calls reset(), and returns false. + + Otherwise, if pixels equals nullptr: sets SkImageInfo, returns true. + + Caller must ensure that pixels are valid for the lifetime of SkBitmap and SkPixelRef. + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + @param pixels address or pixel storage; may be nullptr + @param rowBytes size of pixel row or larger + @return true if SkImageInfo is set to info + */ + bool installPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) { + return this->installPixels(info, pixels, rowBytes, nullptr, nullptr); + } + + /** Sets SkImageInfo to pixmap.info() following the rules in setInfo(), and creates + SkPixelRef containing pixmap.addr() and pixmap.rowBytes(). + + If SkImageInfo could not be set, or pixmap.rowBytes() is less than + SkImageInfo::minRowBytes(): calls reset(), and returns false. + + Otherwise, if pixmap.addr() equals nullptr: sets SkImageInfo, returns true. + + Caller must ensure that pixmap is valid for the lifetime of SkBitmap and SkPixelRef. + + @param pixmap SkImageInfo, pixel address, and rowBytes() + @return true if SkImageInfo was set to pixmap.info() + + example: https://fiddle.skia.org/c/@Bitmap_installPixels_3 + */ + bool installPixels(const SkPixmap& pixmap); + + /** Deprecated. + */ + bool installMaskPixels(const SkMask& mask); + + /** Replaces SkPixelRef with pixels, preserving SkImageInfo and rowBytes(). + Sets SkPixelRef origin to (0, 0). + + If pixels is nullptr, or if info().colorType() equals kUnknown_SkColorType; + release reference to SkPixelRef, and set SkPixelRef to nullptr. + + Caller is responsible for handling ownership pixel memory for the lifetime + of SkBitmap and SkPixelRef. + + @param pixels address of pixel storage, managed by caller + + example: https://fiddle.skia.org/c/@Bitmap_setPixels + */ + void setPixels(void* pixels); + + /** Allocates pixel memory with HeapAllocator, and replaces existing SkPixelRef. + The allocation size is determined by SkImageInfo width, height, and SkColorType. + + Returns false if info().colorType() is kUnknown_SkColorType, or allocation fails. + + @return true if the allocation succeeds + */ + bool SK_WARN_UNUSED_RESULT tryAllocPixels() { + return this->tryAllocPixels((Allocator*)nullptr); + } + + /** Allocates pixel memory with HeapAllocator, and replaces existing SkPixelRef. + The allocation size is determined by SkImageInfo width, height, and SkColorType. + + Aborts if info().colorType() is kUnknown_SkColorType, or allocation fails. + Abort steps may be provided by the user at compile + time by defining SK_ABORT. + + example: https://fiddle.skia.org/c/@Bitmap_allocPixels_3 + */ + void allocPixels(); + + /** Allocates pixel memory with allocator, and replaces existing SkPixelRef. + The allocation size is determined by SkImageInfo width, height, and SkColorType. + If allocator is nullptr, use HeapAllocator instead. + + Returns false if Allocator::allocPixelRef return false. + + @param allocator instance of SkBitmap::Allocator instantiation + @return true if custom allocator reports success + */ + bool SK_WARN_UNUSED_RESULT tryAllocPixels(Allocator* allocator); + + /** Allocates pixel memory with allocator, and replaces existing SkPixelRef. + The allocation size is determined by SkImageInfo width, height, and SkColorType. + If allocator is nullptr, use HeapAllocator instead. + + Aborts if Allocator::allocPixelRef return false. Abort steps may be provided by + the user at compile time by defining SK_ABORT. + + @param allocator instance of SkBitmap::Allocator instantiation + + example: https://fiddle.skia.org/c/@Bitmap_allocPixels_4 + */ + void allocPixels(Allocator* allocator); + + /** Returns SkPixelRef, which contains: pixel base address; its dimensions; and + rowBytes(), the interval from one row to the next. Does not change SkPixelRef + reference count. SkPixelRef may be shared by multiple bitmaps. + If SkPixelRef has not been set, returns nullptr. + + @return SkPixelRef, or nullptr + */ + SkPixelRef* pixelRef() const { return fPixelRef.get(); } + + /** Returns origin of pixels within SkPixelRef. SkBitmap bounds is always contained + by SkPixelRef bounds, which may be the same size or larger. Multiple SkBitmap + can share the same SkPixelRef, where each SkBitmap has different bounds. + + The returned origin added to SkBitmap dimensions equals or is smaller than the + SkPixelRef dimensions. + + Returns (0, 0) if SkPixelRef is nullptr. + + @return pixel origin within SkPixelRef + + example: https://fiddle.skia.org/c/@Bitmap_pixelRefOrigin + */ + SkIPoint pixelRefOrigin() const; + + /** Replaces pixelRef and origin in SkBitmap. dx and dy specify the offset + within the SkPixelRef pixels for the top-left corner of the bitmap. + + Asserts in debug builds if dx or dy are out of range. Pins dx and dy + to legal range in release builds. + + The caller is responsible for ensuring that the pixels match the + SkColorType and SkAlphaType in SkImageInfo. + + @param pixelRef SkPixelRef describing pixel address and rowBytes() + @param dx column offset in SkPixelRef for bitmap origin + @param dy row offset in SkPixelRef for bitmap origin + + example: https://fiddle.skia.org/c/@Bitmap_setPixelRef + */ + void setPixelRef(sk_sp<SkPixelRef> pixelRef, int dx, int dy); + + /** Returns true if SkBitmap is can be drawn. + + @return true if getPixels() is not nullptr + */ + bool readyToDraw() const { + return this->getPixels() != nullptr; + } + + /** Returns a unique value corresponding to the pixels in SkPixelRef. + Returns a different value after notifyPixelsChanged() has been called. + Returns zero if SkPixelRef is nullptr. + + Determines if pixels have changed since last examined. + + @return unique value for pixels in SkPixelRef + + example: https://fiddle.skia.org/c/@Bitmap_getGenerationID + */ + uint32_t getGenerationID() const; + + /** Marks that pixels in SkPixelRef have changed. Subsequent calls to + getGenerationID() return a different value. + + example: https://fiddle.skia.org/c/@Bitmap_notifyPixelsChanged + */ + void notifyPixelsChanged() const; + + /** Replaces pixel values with c, interpreted as being in the sRGB SkColorSpace. + All pixels contained by bounds() are affected. If the colorType() is + kGray_8_SkColorType or kRGB_565_SkColorType, then alpha is ignored; RGB is + treated as opaque. If colorType() is kAlpha_8_SkColorType, then RGB is ignored. + + @param c unpremultiplied color + + example: https://fiddle.skia.org/c/@Bitmap_eraseColor + */ + void eraseColor(SkColor c) const; + + /** Replaces pixel values with unpremultiplied color built from a, r, g, and b, + interpreted as being in the sRGB SkColorSpace. All pixels contained by + bounds() are affected. If the colorType() is kGray_8_SkColorType or + kRGB_565_SkColorType, then a is ignored; r, g, and b are treated as opaque. + If colorType() is kAlpha_8_SkColorType, then r, g, and b are ignored. + + @param a amount of alpha, from fully transparent (0) to fully opaque (255) + @param r amount of red, from no red (0) to full red (255) + @param g amount of green, from no green (0) to full green (255) + @param b amount of blue, from no blue (0) to full blue (255) + */ + void eraseARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) const { + this->eraseColor(SkColorSetARGB(a, r, g, b)); + } + + /** Replaces pixel values inside area with c. interpreted as being in the sRGB + SkColorSpace. If area does not intersect bounds(), call has no effect. + + If the colorType() is kGray_8_SkColorType or kRGB_565_SkColorType, then alpha + is ignored; RGB is treated as opaque. If colorType() is kAlpha_8_SkColorType, + then RGB is ignored. + + @param c unpremultiplied color + @param area rectangle to fill + + example: https://fiddle.skia.org/c/@Bitmap_erase + */ + void erase(SkColor c, const SkIRect& area) const; + + /** Deprecated. + */ + void eraseArea(const SkIRect& area, SkColor c) const { + this->erase(c, area); + } + + /** Returns pixel at (x, y) as unpremultiplied color. + Returns black with alpha if SkColorType is kAlpha_8_SkColorType. + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined; and returns undefined values or may crash if + SK_RELEASE is defined. Fails if SkColorType is kUnknown_SkColorType or + pixel address is nullptr. + + SkColorSpace in SkImageInfo is ignored. Some color precision may be lost in the + conversion to unpremultiplied color; original pixel data may have additional + precision. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return pixel converted to unpremultiplied color + */ + SkColor getColor(int x, int y) const { + return this->pixmap().getColor(x, y); + } + + /** Look up the pixel at (x,y) and return its alpha component, normalized to [0..1]. + This is roughly equivalent to SkGetColorA(getColor()), but can be more efficent + (and more precise if the pixels store more than 8 bits per component). + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return alpha converted to normalized float + */ + float getAlphaf(int x, int y) const { + return this->pixmap().getAlphaf(x, y); + } + + /** Returns pixel address at (x, y). + + Input is not validated: out of bounds values of x or y, or kUnknown_SkColorType, + trigger an assert() if built with SK_DEBUG defined. Returns nullptr if + SkColorType is kUnknown_SkColorType, or SkPixelRef is nullptr. + + Performs a lookup of pixel size; for better performance, call + one of: getAddr8(), getAddr16(), or getAddr32(). + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return generic pointer to pixel + + example: https://fiddle.skia.org/c/@Bitmap_getAddr + */ + void* getAddr(int x, int y) const; + + /** Returns address at (x, y). + + Input is not validated. Triggers an assert() if built with SK_DEBUG defined and: + - SkPixelRef is nullptr + - bytesPerPixel() is not four + - x is negative, or not less than width() + - y is negative, or not less than height() + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return unsigned 32-bit pointer to pixel at (x, y) + */ + inline uint32_t* getAddr32(int x, int y) const; + + /** Returns address at (x, y). + + Input is not validated. Triggers an assert() if built with SK_DEBUG defined and: + - SkPixelRef is nullptr + - bytesPerPixel() is not two + - x is negative, or not less than width() + - y is negative, or not less than height() + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return unsigned 16-bit pointer to pixel at (x, y) + */ + inline uint16_t* getAddr16(int x, int y) const; + + /** Returns address at (x, y). + + Input is not validated. Triggers an assert() if built with SK_DEBUG defined and: + - SkPixelRef is nullptr + - bytesPerPixel() is not one + - x is negative, or not less than width() + - y is negative, or not less than height() + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return unsigned 8-bit pointer to pixel at (x, y) + */ + inline uint8_t* getAddr8(int x, int y) const; + + /** Shares SkPixelRef with dst. Pixels are not copied; SkBitmap and dst point + to the same pixels; dst bounds() are set to the intersection of subset + and the original bounds(). + + subset may be larger than bounds(). Any area outside of bounds() is ignored. + + Any contents of dst are discarded. + + Return false if: + - dst is nullptr + - SkPixelRef is nullptr + - subset does not intersect bounds() + + @param dst SkBitmap set to subset + @param subset rectangle of pixels to reference + @return true if dst is replaced by subset + + example: https://fiddle.skia.org/c/@Bitmap_extractSubset + */ + bool extractSubset(SkBitmap* dst, const SkIRect& subset) const; + + /** Copies a SkRect of pixels from SkBitmap to dstPixels. Copy starts at (srcX, srcY), + and does not exceed SkBitmap (width(), height()). + + dstInfo specifies width, height, SkColorType, SkAlphaType, and SkColorSpace of + destination. dstRowBytes specifics the gap from one destination row to the next. + Returns true if pixels are copied. Returns false if: + - dstInfo has no address + - dstRowBytes is less than dstInfo.minRowBytes() + - SkPixelRef is nullptr + + Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match. + If SkBitmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match. + If SkBitmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must + match. If SkBitmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns + false if pixel conversion is not possible. + + srcX and srcY may be negative to copy only top or left of source. Returns + false if width() or height() is zero or negative. + Returns false if abs(srcX) >= Bitmap width(), or if abs(srcY) >= Bitmap height(). + + @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace + @param dstPixels destination pixel storage + @param dstRowBytes destination row length + @param srcX column index whose absolute value is less than width() + @param srcY row index whose absolute value is less than height() + @return true if pixels are copied to dstPixels + */ + bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes, + int srcX, int srcY) const; + + /** Copies a SkRect of pixels from SkBitmap to dst. Copy starts at (srcX, srcY), and + does not exceed SkBitmap (width(), height()). + + dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage, + and row bytes of destination. dst.rowBytes() specifics the gap from one destination + row to the next. Returns true if pixels are copied. Returns false if: + - dst pixel storage equals nullptr + - dst.rowBytes is less than SkImageInfo::minRowBytes() + - SkPixelRef is nullptr + + Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match. + If SkBitmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match. + If SkBitmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must + match. If SkBitmap colorSpace() is nullptr, dst SkColorSpace must match. Returns + false if pixel conversion is not possible. + + srcX and srcY may be negative to copy only top or left of source. Returns + false if width() or height() is zero or negative. + Returns false if abs(srcX) >= Bitmap width(), or if abs(srcY) >= Bitmap height(). + + @param dst destination SkPixmap: SkImageInfo, pixels, row bytes + @param srcX column index whose absolute value is less than width() + @param srcY row index whose absolute value is less than height() + @return true if pixels are copied to dst + + example: https://fiddle.skia.org/c/@Bitmap_readPixels_2 + */ + bool readPixels(const SkPixmap& dst, int srcX, int srcY) const; + + /** Copies a SkRect of pixels from SkBitmap to dst. Copy starts at (0, 0), and + does not exceed SkBitmap (width(), height()). + + dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage, + and row bytes of destination. dst.rowBytes() specifics the gap from one destination + row to the next. Returns true if pixels are copied. Returns false if: + - dst pixel storage equals nullptr + - dst.rowBytes is less than SkImageInfo::minRowBytes() + - SkPixelRef is nullptr + + Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match. + If SkBitmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match. + If SkBitmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must + match. If SkBitmap colorSpace() is nullptr, dst SkColorSpace must match. Returns + false if pixel conversion is not possible. + + @param dst destination SkPixmap: SkImageInfo, pixels, row bytes + @return true if pixels are copied to dst + */ + bool readPixels(const SkPixmap& dst) const { + return this->readPixels(dst, 0, 0); + } + + /** Copies a SkRect of pixels from src. Copy starts at (dstX, dstY), and does not exceed + (src.width(), src.height()). + + src specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage, + and row bytes of source. src.rowBytes() specifics the gap from one source + row to the next. Returns true if pixels are copied. Returns false if: + - src pixel storage equals nullptr + - src.rowBytes is less than SkImageInfo::minRowBytes() + - SkPixelRef is nullptr + + Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; src SkColorType must match. + If SkBitmap colorType() is kGray_8_SkColorType, src SkColorSpace must match. + If SkBitmap alphaType() is kOpaque_SkAlphaType, src SkAlphaType must + match. If SkBitmap colorSpace() is nullptr, src SkColorSpace must match. Returns + false if pixel conversion is not possible. + + dstX and dstY may be negative to copy only top or left of source. Returns + false if width() or height() is zero or negative. + Returns false if abs(dstX) >= Bitmap width(), or if abs(dstY) >= Bitmap height(). + + @param src source SkPixmap: SkImageInfo, pixels, row bytes + @param dstX column index whose absolute value is less than width() + @param dstY row index whose absolute value is less than height() + @return true if src pixels are copied to SkBitmap + + example: https://fiddle.skia.org/c/@Bitmap_writePixels + */ + bool writePixels(const SkPixmap& src, int dstX, int dstY); + + /** Copies a SkRect of pixels from src. Copy starts at (0, 0), and does not exceed + (src.width(), src.height()). + + src specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage, + and row bytes of source. src.rowBytes() specifics the gap from one source + row to the next. Returns true if pixels are copied. Returns false if: + - src pixel storage equals nullptr + - src.rowBytes is less than SkImageInfo::minRowBytes() + - SkPixelRef is nullptr + + Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; src SkColorType must match. + If SkBitmap colorType() is kGray_8_SkColorType, src SkColorSpace must match. + If SkBitmap alphaType() is kOpaque_SkAlphaType, src SkAlphaType must + match. If SkBitmap colorSpace() is nullptr, src SkColorSpace must match. Returns + false if pixel conversion is not possible. + + @param src source SkPixmap: SkImageInfo, pixels, row bytes + @return true if src pixels are copied to SkBitmap + */ + bool writePixels(const SkPixmap& src) { + return this->writePixels(src, 0, 0); + } + + /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to + or dst pixels cannot be allocated. + + Uses HeapAllocator to reserve memory for dst SkPixelRef. + + @param dst holds SkPixelRef to fill with alpha layer + @return true if alpha layer was constructed in dst SkPixelRef + */ + bool extractAlpha(SkBitmap* dst) const { + return this->extractAlpha(dst, nullptr, nullptr, nullptr); + } + + /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to + or dst pixels cannot be allocated. + + If paint is not nullptr and contains SkMaskFilter, SkMaskFilter + generates mask alpha from SkBitmap. Uses HeapAllocator to reserve memory for dst + SkPixelRef. Sets offset to top-left position for dst for alignment with SkBitmap; + (0, 0) unless SkMaskFilter generates mask. + + @param dst holds SkPixelRef to fill with alpha layer + @param paint holds optional SkMaskFilter; may be nullptr + @param offset top-left position for dst; may be nullptr + @return true if alpha layer was constructed in dst SkPixelRef + */ + bool extractAlpha(SkBitmap* dst, const SkPaint* paint, + SkIPoint* offset) const { + return this->extractAlpha(dst, paint, nullptr, offset); + } + + /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to + or dst pixels cannot be allocated. + + If paint is not nullptr and contains SkMaskFilter, SkMaskFilter + generates mask alpha from SkBitmap. allocator may reference a custom allocation + class or be set to nullptr to use HeapAllocator. Sets offset to top-left + position for dst for alignment with SkBitmap; (0, 0) unless SkMaskFilter generates + mask. + + @param dst holds SkPixelRef to fill with alpha layer + @param paint holds optional SkMaskFilter; may be nullptr + @param allocator function to reserve memory for SkPixelRef; may be nullptr + @param offset top-left position for dst; may be nullptr + @return true if alpha layer was constructed in dst SkPixelRef + */ + bool extractAlpha(SkBitmap* dst, const SkPaint* paint, Allocator* allocator, + SkIPoint* offset) const; + + /** Copies SkBitmap pixel address, row bytes, and SkImageInfo to pixmap, if address + is available, and returns true. If pixel address is not available, return + false and leave pixmap unchanged. + + pixmap contents become invalid on any future change to SkBitmap. + + @param pixmap storage for pixel state if pixels are readable; otherwise, ignored + @return true if SkBitmap has direct access to pixels + + example: https://fiddle.skia.org/c/@Bitmap_peekPixels + */ + bool peekPixels(SkPixmap* pixmap) const; + sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions&, + const SkMatrix* = nullptr) const; + + sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions& sampling, + const SkMatrix& localMatrix) const { + return this->makeShader(tmx, tmy, sampling, &localMatrix); + } + + sk_sp<SkShader> makeShader(const SkSamplingOptions& sampling, + const SkMatrix* localMatrix = nullptr) const { + return this->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, sampling, localMatrix); + } + + sk_sp<SkShader> makeShader(const SkSamplingOptions& sampling, + const SkMatrix& localMatrix) const { + return this->makeShader(sampling, &localMatrix); + } + + /** + * Returns a new image from the bitmap. If the bitmap is marked immutable, this will + * share the pixel buffer. If not, it will make a copy of the pixels for the image. + */ + sk_sp<SkImage> asImage() const; + + /** Asserts if internal values are illegal or inconsistent. Only available if + SK_DEBUG is defined at compile time. + */ + SkDEBUGCODE(void validate() const;) + + /** \class SkBitmap::Allocator + Abstract subclass of HeapAllocator. + */ + class Allocator : public SkRefCnt { + public: + + /** Allocates the pixel memory for the bitmap, given its dimensions and + SkColorType. Returns true on success, where success means either setPixels() + or setPixelRef() was called. + + @param bitmap SkBitmap containing SkImageInfo as input, and SkPixelRef as output + @return true if SkPixelRef was allocated + */ + virtual bool allocPixelRef(SkBitmap* bitmap) = 0; + private: + using INHERITED = SkRefCnt; + }; + + /** \class SkBitmap::HeapAllocator + Subclass of SkBitmap::Allocator that returns a SkPixelRef that allocates its pixel + memory from the heap. This is the default SkBitmap::Allocator invoked by + allocPixels(). + */ + class HeapAllocator : public Allocator { + public: + + /** Allocates the pixel memory for the bitmap, given its dimensions and + SkColorType. Returns true on success, where success means either setPixels() + or setPixelRef() was called. + + @param bitmap SkBitmap containing SkImageInfo as input, and SkPixelRef as output + @return true if pixels are allocated + + example: https://fiddle.skia.org/c/@Bitmap_HeapAllocator_allocPixelRef + */ + bool allocPixelRef(SkBitmap* bitmap) override; + }; + +private: + sk_sp<SkPixelRef> fPixelRef; + SkPixmap fPixmap; + sk_sp<SkMipmap> fMips; + + friend class SkImage_Raster; + friend class SkReadBuffer; // unflatten +}; + +/////////////////////////////////////////////////////////////////////////////// + +inline uint32_t* SkBitmap::getAddr32(int x, int y) const { + SkASSERT(fPixmap.addr()); + return fPixmap.writable_addr32(x, y); +} + +inline uint16_t* SkBitmap::getAddr16(int x, int y) const { + SkASSERT(fPixmap.addr()); + return fPixmap.writable_addr16(x, y); +} + +inline uint8_t* SkBitmap::getAddr8(int x, int y) const { + SkASSERT(fPixmap.addr()); + return fPixmap.writable_addr8(x, y); +} + +#endif diff --git a/src/deps/skia/include/core/SkBlendMode.h b/src/deps/skia/include/core/SkBlendMode.h new file mode 100644 index 000000000..07640cf55 --- /dev/null +++ b/src/deps/skia/include/core/SkBlendMode.h @@ -0,0 +1,110 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBlendMode_DEFINED +#define SkBlendMode_DEFINED + +#include "include/core/SkTypes.h" + +/** + * Blends are operators that take in two colors (source, destination) and return a new color. + * Many of these operate the same on all 4 components: red, green, blue, alpha. For these, + * we just document what happens to one component, rather than naming each one separately. + * + * Different SkColorTypes have different representations for color components: + * 8-bit: 0..255 + * 6-bit: 0..63 + * 5-bit: 0..31 + * 4-bit: 0..15 + * floats: 0...1 + * + * The documentation is expressed as if the component values are always 0..1 (floats). + * + * For brevity, the documentation uses the following abbreviations + * s : source + * d : destination + * sa : source alpha + * da : destination alpha + * + * Results are abbreviated + * r : if all 4 components are computed in the same manner + * ra : result alpha component + * rc : result "color": red, green, blue components + */ +enum class SkBlendMode { + kClear, //!< r = 0 + kSrc, //!< r = s + kDst, //!< r = d + kSrcOver, //!< r = s + (1-sa)*d + kDstOver, //!< r = d + (1-da)*s + kSrcIn, //!< r = s * da + kDstIn, //!< r = d * sa + kSrcOut, //!< r = s * (1-da) + kDstOut, //!< r = d * (1-sa) + kSrcATop, //!< r = s*da + d*(1-sa) + kDstATop, //!< r = d*sa + s*(1-da) + kXor, //!< r = s*(1-da) + d*(1-sa) + kPlus, //!< r = min(s + d, 1) + kModulate, //!< r = s*d + kScreen, //!< r = s + d - s*d + + kOverlay, //!< multiply or screen, depending on destination + kDarken, //!< rc = s + d - max(s*da, d*sa), ra = kSrcOver + kLighten, //!< rc = s + d - min(s*da, d*sa), ra = kSrcOver + kColorDodge, //!< brighten destination to reflect source + kColorBurn, //!< darken destination to reflect source + kHardLight, //!< multiply or screen, depending on source + kSoftLight, //!< lighten or darken, depending on source + kDifference, //!< rc = s + d - 2*(min(s*da, d*sa)), ra = kSrcOver + kExclusion, //!< rc = s + d - two(s*d), ra = kSrcOver + kMultiply, //!< r = s*(1-da) + d*(1-sa) + s*d + + kHue, //!< hue of source with saturation and luminosity of destination + kSaturation, //!< saturation of source with hue and luminosity of destination + kColor, //!< hue and saturation of source with luminosity of destination + kLuminosity, //!< luminosity of source with hue and saturation of destination + + kLastCoeffMode = kScreen, //!< last porter duff blend mode + kLastSeparableMode = kMultiply, //!< last blend mode operating separately on components + kLastMode = kLuminosity, //!< last valid value +}; + +/** + * For Porter-Duff SkBlendModes (those <= kLastCoeffMode), these coefficients describe the blend + * equation used. Coefficient-based blend modes specify an equation: + * ('dstCoeff' * dst + 'srcCoeff' * src), where the coefficient values are constants, functions of + * the src or dst alpha, or functions of the src or dst color. + */ +enum class SkBlendModeCoeff { + kZero, /** 0 */ + kOne, /** 1 */ + kSC, /** src color */ + kISC, /** inverse src color (i.e. 1 - sc) */ + kDC, /** dst color */ + kIDC, /** inverse dst color (i.e. 1 - dc) */ + kSA, /** src alpha */ + kISA, /** inverse src alpha (i.e. 1 - sa) */ + kDA, /** dst alpha */ + kIDA, /** inverse dst alpha (i.e. 1 - da) */ + + kCoeffCount +}; + +/** + * Returns true if 'mode' is a coefficient-based blend mode (<= kLastCoeffMode). If true is + * returned, the mode's src and dst coefficient functions are set in 'src' and 'dst'. + */ +SK_API bool SkBlendMode_AsCoeff(SkBlendMode mode, SkBlendModeCoeff* src, SkBlendModeCoeff* dst); + + +/** Returns name of blendMode as null-terminated C string. + + @return C string +*/ +SK_API const char* SkBlendMode_Name(SkBlendMode blendMode); + +#endif diff --git a/src/deps/skia/include/core/SkBlender.h b/src/deps/skia/include/core/SkBlender.h new file mode 100644 index 000000000..7acba87f5 --- /dev/null +++ b/src/deps/skia/include/core/SkBlender.h @@ -0,0 +1,33 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBlender_DEFINED +#define SkBlender_DEFINED + +#include "include/core/SkBlendMode.h" +#include "include/core/SkFlattenable.h" + +/** + * SkBlender represents a custom blend function in the Skia pipeline. When an SkBlender is + * present in a paint, the SkBlendMode is ignored. A blender combines a source color (the + * result of our paint) and destination color (from the canvas) into a final color. + */ +class SK_API SkBlender : public SkFlattenable { +public: + /** + * Create a blender that implements the specified BlendMode. + */ + static sk_sp<SkBlender> Mode(SkBlendMode mode); + +private: + SkBlender() = default; + friend class SkBlenderBase; + + using INHERITED = SkFlattenable; +}; + +#endif diff --git a/src/deps/skia/include/core/SkBlurTypes.h b/src/deps/skia/include/core/SkBlurTypes.h new file mode 100644 index 000000000..aec37b6e6 --- /dev/null +++ b/src/deps/skia/include/core/SkBlurTypes.h @@ -0,0 +1,22 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBlurTypes_DEFINED +#define SkBlurTypes_DEFINED + +#include "include/core/SkTypes.h" + +enum SkBlurStyle : int { + kNormal_SkBlurStyle, //!< fuzzy inside and outside + kSolid_SkBlurStyle, //!< solid inside, fuzzy outside + kOuter_SkBlurStyle, //!< nothing inside, fuzzy outside + kInner_SkBlurStyle, //!< fuzzy inside, nothing outside + + kLastEnum_SkBlurStyle = kInner_SkBlurStyle, +}; + +#endif diff --git a/src/deps/skia/include/core/SkCanvas.h b/src/deps/skia/include/core/SkCanvas.h new file mode 100644 index 000000000..a2867f9bb --- /dev/null +++ b/src/deps/skia/include/core/SkCanvas.h @@ -0,0 +1,2583 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCanvas_DEFINED +#define SkCanvas_DEFINED + +#include "include/core/SkBlendMode.h" +#include "include/core/SkClipOp.h" +#include "include/core/SkColor.h" +#include "include/core/SkFontTypes.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkM44.h" +#include "include/core/SkMatrix.h" +#include "include/core/SkPaint.h" +#include "include/core/SkPoint.h" +#include "include/core/SkRasterHandleAllocator.h" +#include "include/core/SkRect.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkSamplingOptions.h" +#include "include/core/SkScalar.h" +#include "include/core/SkSize.h" +#include "include/core/SkString.h" +#include "include/core/SkSurfaceProps.h" +#include "include/core/SkTypes.h" +#include "include/private/SkDeque.h" +#include "include/private/SkMacros.h" + +#include <cstring> +#include <memory> +#include <vector> + +#ifndef SK_SUPPORT_LEGACY_GETTOTALMATRIX +#define SK_SUPPORT_LEGACY_GETTOTALMATRIX +#endif + +class AutoLayerForImageFilter; +class GrBackendRenderTarget; +class GrRecordingContext; +class GrSlug; +class SkBaseDevice; +class SkBitmap; +class SkData; +class SkDrawable; +struct SkDrawShadowRec; +class SkFont; +class SkGlyphRunBuilder; +class SkGlyphRunList; +class SkImage; +class SkImageFilter; +class SkPaintFilterCanvas; +class SkPath; +class SkPicture; +class SkPixmap; +class SkRegion; +class SkRRect; +struct SkRSXform; +struct SkCustomMesh; +class SkSpecialImage; +class SkSurface; +class SkSurface_Base; +class SkTextBlob; +class SkVertices; + +namespace skstd { + template<typename T> class optional; +} + +/** \class SkCanvas + SkCanvas provides an interface for drawing, and how the drawing is clipped and transformed. + SkCanvas contains a stack of SkMatrix and clip values. + + SkCanvas and SkPaint together provide the state to draw into SkSurface or SkBaseDevice. + Each SkCanvas draw call transforms the geometry of the object by the concatenation of all + SkMatrix values in the stack. The transformed geometry is clipped by the intersection + of all of clip values in the stack. The SkCanvas draw calls use SkPaint to supply drawing + state such as color, SkTypeface, text size, stroke width, SkShader and so on. + + To draw to a pixel-based destination, create raster surface or GPU surface. + Request SkCanvas from SkSurface to obtain the interface to draw. + SkCanvas generated by raster surface draws to memory visible to the CPU. + SkCanvas generated by GPU surface uses Vulkan or OpenGL to draw to the GPU. + + To draw to a document, obtain SkCanvas from SVG canvas, document PDF, or SkPictureRecorder. + SkDocument based SkCanvas and other SkCanvas subclasses reference SkBaseDevice describing the + destination. + + SkCanvas can be constructed to draw to SkBitmap without first creating raster surface. + This approach may be deprecated in the future. +*/ +class SK_API SkCanvas { +public: + + /** Allocates raster SkCanvas that will draw directly into pixels. + + SkCanvas is returned if all parameters are valid. + Valid parameters include: + info dimensions are zero or positive; + info contains SkColorType and SkAlphaType supported by raster surface; + pixels is not nullptr; + rowBytes is zero or large enough to contain info width pixels of SkColorType. + + Pass zero for rowBytes to compute rowBytes from info width and size of pixel. + If rowBytes is greater than zero, it must be equal to or greater than + info width times bytes required for SkColorType. + + Pixel buffer size should be info height times computed rowBytes. + Pixels are not initialized. + To access pixels after drawing, call flush() or peekPixels(). + + @param info width, height, SkColorType, SkAlphaType, SkColorSpace, of raster surface; + width, or height, or both, may be zero + @param pixels pointer to destination pixels buffer + @param rowBytes interval from one SkSurface row to the next, or zero + @param props LCD striping orientation and setting for device independent fonts; + may be nullptr + @return SkCanvas if all parameters are valid; otherwise, nullptr + */ + static std::unique_ptr<SkCanvas> MakeRasterDirect(const SkImageInfo& info, void* pixels, + size_t rowBytes, + const SkSurfaceProps* props = nullptr); + + /** Allocates raster SkCanvas specified by inline image specification. Subsequent SkCanvas + calls draw into pixels. + SkColorType is set to kN32_SkColorType. + SkAlphaType is set to kPremul_SkAlphaType. + To access pixels after drawing, call flush() or peekPixels(). + + SkCanvas is returned if all parameters are valid. + Valid parameters include: + width and height are zero or positive; + pixels is not nullptr; + rowBytes is zero or large enough to contain width pixels of kN32_SkColorType. + + Pass zero for rowBytes to compute rowBytes from width and size of pixel. + If rowBytes is greater than zero, it must be equal to or greater than + width times bytes required for SkColorType. + + Pixel buffer size should be height times rowBytes. + + @param width pixel column count on raster surface created; must be zero or greater + @param height pixel row count on raster surface created; must be zero or greater + @param pixels pointer to destination pixels buffer; buffer size should be height + times rowBytes + @param rowBytes interval from one SkSurface row to the next, or zero + @return SkCanvas if all parameters are valid; otherwise, nullptr + */ + static std::unique_ptr<SkCanvas> MakeRasterDirectN32(int width, int height, SkPMColor* pixels, + size_t rowBytes) { + return MakeRasterDirect(SkImageInfo::MakeN32Premul(width, height), pixels, rowBytes); + } + + /** Creates an empty SkCanvas with no backing device or pixels, with + a width and height of zero. + + @return empty SkCanvas + + example: https://fiddle.skia.org/c/@Canvas_empty_constructor + */ + SkCanvas(); + + /** Creates SkCanvas of the specified dimensions without a SkSurface. + Used by subclasses with custom implementations for draw member functions. + + If props equals nullptr, SkSurfaceProps are created with + SkSurfaceProps::InitType settings, which choose the pixel striping + direction and order. Since a platform may dynamically change its direction when + the device is rotated, and since a platform may have multiple monitors with + different characteristics, it is best not to rely on this legacy behavior. + + @param width zero or greater + @param height zero or greater + @param props LCD striping orientation and setting for device independent fonts; + may be nullptr + @return SkCanvas placeholder with dimensions + + example: https://fiddle.skia.org/c/@Canvas_int_int_const_SkSurfaceProps_star + */ + SkCanvas(int width, int height, const SkSurfaceProps* props = nullptr); + + /** Private. For internal use only. + */ + explicit SkCanvas(sk_sp<SkBaseDevice> device); + + /** Constructs a canvas that draws into bitmap. + Sets kUnknown_SkPixelGeometry in constructed SkSurface. + + SkBitmap is copied so that subsequently editing bitmap will not affect + constructed SkCanvas. + + May be deprecated in the future. + + @param bitmap width, height, SkColorType, SkAlphaType, and pixel + storage of raster surface + @return SkCanvas that can be used to draw into bitmap + + example: https://fiddle.skia.org/c/@Canvas_copy_const_SkBitmap + */ + explicit SkCanvas(const SkBitmap& bitmap); + +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + /** Private. + */ + enum class ColorBehavior { + kLegacy, //!< placeholder + }; + + /** Private. For use by Android framework only. + + @param bitmap specifies a bitmap for the canvas to draw into + @param behavior specializes this constructor; value is unused + @return SkCanvas that can be used to draw into bitmap + */ + SkCanvas(const SkBitmap& bitmap, ColorBehavior behavior); +#endif + + /** Constructs a canvas that draws into bitmap. + Use props to match the device characteristics, like LCD striping. + + bitmap is copied so that subsequently editing bitmap will not affect + constructed SkCanvas. + + @param bitmap width, height, SkColorType, SkAlphaType, + and pixel storage of raster surface + @param props order and orientation of RGB striping; and whether to use + device independent fonts + @return SkCanvas that can be used to draw into bitmap + + example: https://fiddle.skia.org/c/@Canvas_const_SkBitmap_const_SkSurfaceProps + */ + SkCanvas(const SkBitmap& bitmap, const SkSurfaceProps& props); + + /** Draws saved layers, if any. + Frees up resources used by SkCanvas. + + example: https://fiddle.skia.org/c/@Canvas_destructor + */ + virtual ~SkCanvas(); + + /** Returns SkImageInfo for SkCanvas. If SkCanvas is not associated with raster surface or + GPU surface, returned SkColorType is set to kUnknown_SkColorType. + + @return dimensions and SkColorType of SkCanvas + + example: https://fiddle.skia.org/c/@Canvas_imageInfo + */ + SkImageInfo imageInfo() const; + + /** Copies SkSurfaceProps, if SkCanvas is associated with raster surface or + GPU surface, and returns true. Otherwise, returns false and leave props unchanged. + + @param props storage for writable SkSurfaceProps + @return true if SkSurfaceProps was copied + + example: https://fiddle.skia.org/c/@Canvas_getProps + */ + bool getProps(SkSurfaceProps* props) const; + + /** Triggers the immediate execution of all pending draw operations. + If SkCanvas is associated with GPU surface, resolves all pending GPU operations. + If SkCanvas is associated with raster surface, has no effect; raster draw + operations are never deferred. + + DEPRECATED: Replace usage with GrDirectContext::flush() + */ + void flush(); + + /** Gets the size of the base or root layer in global canvas coordinates. The + origin of the base layer is always (0,0). The area available for drawing may be + smaller (due to clipping or saveLayer). + + @return integral width and height of base layer + + example: https://fiddle.skia.org/c/@Canvas_getBaseLayerSize + */ + virtual SkISize getBaseLayerSize() const; + + /** Creates SkSurface matching info and props, and associates it with SkCanvas. + Returns nullptr if no match found. + + If props is nullptr, matches SkSurfaceProps in SkCanvas. If props is nullptr and SkCanvas + does not have SkSurfaceProps, creates SkSurface with default SkSurfaceProps. + + @param info width, height, SkColorType, SkAlphaType, and SkColorSpace + @param props SkSurfaceProps to match; may be nullptr to match SkCanvas + @return SkSurface matching info and props, or nullptr if no match is available + + example: https://fiddle.skia.org/c/@Canvas_makeSurface + */ + sk_sp<SkSurface> makeSurface(const SkImageInfo& info, const SkSurfaceProps* props = nullptr); + + /** Returns GPU context of the GPU surface associated with SkCanvas. + + @return GPU context, if available; nullptr otherwise + + example: https://fiddle.skia.org/c/@Canvas_recordingContext + */ + virtual GrRecordingContext* recordingContext(); + + /** Sometimes a canvas is owned by a surface. If it is, getSurface() will return a bare + * pointer to that surface, else this will return nullptr. + */ + SkSurface* getSurface() const; + + /** Returns the pixel base address, SkImageInfo, rowBytes, and origin if the pixels + can be read directly. The returned address is only valid + while SkCanvas is in scope and unchanged. Any SkCanvas call or SkSurface call + may invalidate the returned address and other returned values. + + If pixels are inaccessible, info, rowBytes, and origin are unchanged. + + @param info storage for writable pixels' SkImageInfo; may be nullptr + @param rowBytes storage for writable pixels' row bytes; may be nullptr + @param origin storage for SkCanvas top layer origin, its top-left corner; + may be nullptr + @return address of pixels, or nullptr if inaccessible + + example: https://fiddle.skia.org/c/@Canvas_accessTopLayerPixels_a + example: https://fiddle.skia.org/c/@Canvas_accessTopLayerPixels_b + */ + void* accessTopLayerPixels(SkImageInfo* info, size_t* rowBytes, SkIPoint* origin = nullptr); + + /** Returns custom context that tracks the SkMatrix and clip. + + Use SkRasterHandleAllocator to blend Skia drawing with custom drawing, typically performed + by the host platform user interface. The custom context returned is generated by + SkRasterHandleAllocator::MakeCanvas, which creates a custom canvas with raster storage for + the drawing destination. + + @return context of custom allocation + + example: https://fiddle.skia.org/c/@Canvas_accessTopRasterHandle + */ + SkRasterHandleAllocator::Handle accessTopRasterHandle() const; + + /** Returns true if SkCanvas has direct access to its pixels. + + Pixels are readable when SkBaseDevice is raster. Pixels are not readable when SkCanvas + is returned from GPU surface, returned by SkDocument::beginPage, returned by + SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility class + like DebugCanvas. + + pixmap is valid only while SkCanvas is in scope and unchanged. Any + SkCanvas or SkSurface call may invalidate the pixmap values. + + @param pixmap storage for pixel state if pixels are readable; otherwise, ignored + @return true if SkCanvas has direct access to pixels + + example: https://fiddle.skia.org/c/@Canvas_peekPixels + */ + bool peekPixels(SkPixmap* pixmap); + + /** Copies SkRect of pixels from SkCanvas into dstPixels. SkMatrix and clip are + ignored. + + Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()). + Destination SkRect corners are (0, 0) and (dstInfo.width(), dstInfo.height()). + Copies each readable pixel intersecting both rectangles, without scaling, + converting to dstInfo.colorType() and dstInfo.alphaType() if required. + + Pixels are readable when SkBaseDevice is raster, or backed by a GPU. + Pixels are not readable when SkCanvas is returned by SkDocument::beginPage, + returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility + class like DebugCanvas. + + The destination pixel storage must be allocated by the caller. + + Pixel values are converted only if SkColorType and SkAlphaType + do not match. Only pixels within both source and destination rectangles + are copied. dstPixels contents outside SkRect intersection are unchanged. + + Pass negative values for srcX or srcY to offset pixels across or down destination. + + Does not copy, and returns false if: + - Source and destination rectangles do not intersect. + - SkCanvas pixels could not be converted to dstInfo.colorType() or dstInfo.alphaType(). + - SkCanvas pixels are not readable; for instance, SkCanvas is document-based. + - dstRowBytes is too small to contain one row of pixels. + + @param dstInfo width, height, SkColorType, and SkAlphaType of dstPixels + @param dstPixels storage for pixels; dstInfo.height() times dstRowBytes, or larger + @param dstRowBytes size of one destination row; dstInfo.width() times pixel size, or larger + @param srcX offset into readable pixels on x-axis; may be negative + @param srcY offset into readable pixels on y-axis; may be negative + @return true if pixels were copied + */ + bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes, + int srcX, int srcY); + + /** Copies SkRect of pixels from SkCanvas into pixmap. SkMatrix and clip are + ignored. + + Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()). + Destination SkRect corners are (0, 0) and (pixmap.width(), pixmap.height()). + Copies each readable pixel intersecting both rectangles, without scaling, + converting to pixmap.colorType() and pixmap.alphaType() if required. + + Pixels are readable when SkBaseDevice is raster, or backed by a GPU. + Pixels are not readable when SkCanvas is returned by SkDocument::beginPage, + returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility + class like DebugCanvas. + + Caller must allocate pixel storage in pixmap if needed. + + Pixel values are converted only if SkColorType and SkAlphaType + do not match. Only pixels within both source and destination SkRect + are copied. pixmap pixels contents outside SkRect intersection are unchanged. + + Pass negative values for srcX or srcY to offset pixels across or down pixmap. + + Does not copy, and returns false if: + - Source and destination rectangles do not intersect. + - SkCanvas pixels could not be converted to pixmap.colorType() or pixmap.alphaType(). + - SkCanvas pixels are not readable; for instance, SkCanvas is document-based. + - SkPixmap pixels could not be allocated. + - pixmap.rowBytes() is too small to contain one row of pixels. + + @param pixmap storage for pixels copied from SkCanvas + @param srcX offset into readable pixels on x-axis; may be negative + @param srcY offset into readable pixels on y-axis; may be negative + @return true if pixels were copied + + example: https://fiddle.skia.org/c/@Canvas_readPixels_2 + */ + bool readPixels(const SkPixmap& pixmap, int srcX, int srcY); + + /** Copies SkRect of pixels from SkCanvas into bitmap. SkMatrix and clip are + ignored. + + Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()). + Destination SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()). + Copies each readable pixel intersecting both rectangles, without scaling, + converting to bitmap.colorType() and bitmap.alphaType() if required. + + Pixels are readable when SkBaseDevice is raster, or backed by a GPU. + Pixels are not readable when SkCanvas is returned by SkDocument::beginPage, + returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility + class like DebugCanvas. + + Caller must allocate pixel storage in bitmap if needed. + + SkBitmap values are converted only if SkColorType and SkAlphaType + do not match. Only pixels within both source and destination rectangles + are copied. SkBitmap pixels outside SkRect intersection are unchanged. + + Pass negative values for srcX or srcY to offset pixels across or down bitmap. + + Does not copy, and returns false if: + - Source and destination rectangles do not intersect. + - SkCanvas pixels could not be converted to bitmap.colorType() or bitmap.alphaType(). + - SkCanvas pixels are not readable; for instance, SkCanvas is document-based. + - bitmap pixels could not be allocated. + - bitmap.rowBytes() is too small to contain one row of pixels. + + @param bitmap storage for pixels copied from SkCanvas + @param srcX offset into readable pixels on x-axis; may be negative + @param srcY offset into readable pixels on y-axis; may be negative + @return true if pixels were copied + + example: https://fiddle.skia.org/c/@Canvas_readPixels_3 + */ + bool readPixels(const SkBitmap& bitmap, int srcX, int srcY); + + /** Copies SkRect from pixels to SkCanvas. SkMatrix and clip are ignored. + Source SkRect corners are (0, 0) and (info.width(), info.height()). + Destination SkRect corners are (x, y) and + (imageInfo().width(), imageInfo().height()). + + Copies each readable pixel intersecting both rectangles, without scaling, + converting to imageInfo().colorType() and imageInfo().alphaType() if required. + + Pixels are writable when SkBaseDevice is raster, or backed by a GPU. + Pixels are not writable when SkCanvas is returned by SkDocument::beginPage, + returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility + class like DebugCanvas. + + Pixel values are converted only if SkColorType and SkAlphaType + do not match. Only pixels within both source and destination rectangles + are copied. SkCanvas pixels outside SkRect intersection are unchanged. + + Pass negative values for x or y to offset pixels to the left or + above SkCanvas pixels. + + Does not copy, and returns false if: + - Source and destination rectangles do not intersect. + - pixels could not be converted to SkCanvas imageInfo().colorType() or + imageInfo().alphaType(). + - SkCanvas pixels are not writable; for instance, SkCanvas is document-based. + - rowBytes is too small to contain one row of pixels. + + @param info width, height, SkColorType, and SkAlphaType of pixels + @param pixels pixels to copy, of size info.height() times rowBytes, or larger + @param rowBytes size of one row of pixels; info.width() times pixel size, or larger + @param x offset into SkCanvas writable pixels on x-axis; may be negative + @param y offset into SkCanvas writable pixels on y-axis; may be negative + @return true if pixels were written to SkCanvas + + example: https://fiddle.skia.org/c/@Canvas_writePixels + */ + bool writePixels(const SkImageInfo& info, const void* pixels, size_t rowBytes, int x, int y); + + /** Copies SkRect from pixels to SkCanvas. SkMatrix and clip are ignored. + Source SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()). + + Destination SkRect corners are (x, y) and + (imageInfo().width(), imageInfo().height()). + + Copies each readable pixel intersecting both rectangles, without scaling, + converting to imageInfo().colorType() and imageInfo().alphaType() if required. + + Pixels are writable when SkBaseDevice is raster, or backed by a GPU. + Pixels are not writable when SkCanvas is returned by SkDocument::beginPage, + returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility + class like DebugCanvas. + + Pixel values are converted only if SkColorType and SkAlphaType + do not match. Only pixels within both source and destination rectangles + are copied. SkCanvas pixels outside SkRect intersection are unchanged. + + Pass negative values for x or y to offset pixels to the left or + above SkCanvas pixels. + + Does not copy, and returns false if: + - Source and destination rectangles do not intersect. + - bitmap does not have allocated pixels. + - bitmap pixels could not be converted to SkCanvas imageInfo().colorType() or + imageInfo().alphaType(). + - SkCanvas pixels are not writable; for instance, SkCanvas is document based. + - bitmap pixels are inaccessible; for instance, bitmap wraps a texture. + + @param bitmap contains pixels copied to SkCanvas + @param x offset into SkCanvas writable pixels on x-axis; may be negative + @param y offset into SkCanvas writable pixels on y-axis; may be negative + @return true if pixels were written to SkCanvas + + example: https://fiddle.skia.org/c/@Canvas_writePixels_2 + example: https://fiddle.skia.org/c/@State_Stack_a + example: https://fiddle.skia.org/c/@State_Stack_b + */ + bool writePixels(const SkBitmap& bitmap, int x, int y); + + /** Saves SkMatrix and clip. + Calling restore() discards changes to SkMatrix and clip, + restoring the SkMatrix and clip to their state when save() was called. + + SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(), setMatrix(), + and resetMatrix(). Clip may be changed by clipRect(), clipRRect(), clipPath(), clipRegion(). + + Saved SkCanvas state is put on a stack; multiple calls to save() should be balance + by an equal number of calls to restore(). + + Call restoreToCount() with result to restore this and subsequent saves. + + @return depth of saved stack + + example: https://fiddle.skia.org/c/@Canvas_save + */ + int save(); + + /** Saves SkMatrix and clip, and allocates a SkBitmap for subsequent drawing. + Calling restore() discards changes to SkMatrix and clip, and draws the SkBitmap. + + SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(), + setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(), + clipPath(), clipRegion(). + + SkRect bounds suggests but does not define the SkBitmap size. To clip drawing to + a specific rectangle, use clipRect(). + + Optional SkPaint paint applies alpha, SkColorFilter, SkImageFilter, and + SkBlendMode when restore() is called. + + Call restoreToCount() with returned value to restore this and subsequent saves. + + @param bounds hint to limit the size of the layer; may be nullptr + @param paint graphics state for layer; may be nullptr + @return depth of saved stack + + example: https://fiddle.skia.org/c/@Canvas_saveLayer + example: https://fiddle.skia.org/c/@Canvas_saveLayer_4 + */ + int saveLayer(const SkRect* bounds, const SkPaint* paint); + + /** Saves SkMatrix and clip, and allocates a SkBitmap for subsequent drawing. + Calling restore() discards changes to SkMatrix and clip, and draws the SkBitmap. + + SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(), + setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(), + clipPath(), clipRegion(). + + SkRect bounds suggests but does not define the layer size. To clip drawing to + a specific rectangle, use clipRect(). + + Optional SkPaint paint applies alpha, SkColorFilter, SkImageFilter, and + SkBlendMode when restore() is called. + + Call restoreToCount() with returned value to restore this and subsequent saves. + + @param bounds hint to limit the size of layer; may be nullptr + @param paint graphics state for layer; may be nullptr + @return depth of saved stack + */ + int saveLayer(const SkRect& bounds, const SkPaint* paint) { + return this->saveLayer(&bounds, paint); + } + + /** Saves SkMatrix and clip, and allocates SkBitmap for subsequent drawing. + + Calling restore() discards changes to SkMatrix and clip, + and blends layer with alpha opacity onto prior layer. + + SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(), + setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(), + clipPath(), clipRegion(). + + SkRect bounds suggests but does not define layer size. To clip drawing to + a specific rectangle, use clipRect(). + + alpha of zero is fully transparent, 255 is fully opaque. + + Call restoreToCount() with returned value to restore this and subsequent saves. + + @param bounds hint to limit the size of layer; may be nullptr + @param alpha opacity of layer + @return depth of saved stack + + example: https://fiddle.skia.org/c/@Canvas_saveLayerAlpha + */ + int saveLayerAlpha(const SkRect* bounds, U8CPU alpha); + + /** \enum SkCanvas::SaveLayerFlagsSet + SaveLayerFlags provides options that may be used in any combination in SaveLayerRec, + defining how layer allocated by saveLayer() operates. It may be set to zero, + kPreserveLCDText_SaveLayerFlag, kInitWithPrevious_SaveLayerFlag, or both flags. + */ + enum SaveLayerFlagsSet { + kPreserveLCDText_SaveLayerFlag = 1 << 1, + kInitWithPrevious_SaveLayerFlag = 1 << 2, //!< initializes with previous contents + // instead of matching previous layer's colortype, use F16 + kF16ColorType = 1 << 4, + }; + + typedef uint32_t SaveLayerFlags; + + /** \struct SkCanvas::SaveLayerRec + SaveLayerRec contains the state used to create the layer. + */ + struct SaveLayerRec { + /** Sets fBounds, fPaint, and fBackdrop to nullptr. Clears fSaveLayerFlags. + + @return empty SaveLayerRec + */ + SaveLayerRec() {} + + /** Sets fBounds, fPaint, and fSaveLayerFlags; sets fBackdrop to nullptr. + + @param bounds layer dimensions; may be nullptr + @param paint applied to layer when overlaying prior layer; may be nullptr + @param saveLayerFlags SaveLayerRec options to modify layer + @return SaveLayerRec with empty fBackdrop + */ + SaveLayerRec(const SkRect* bounds, const SkPaint* paint, SaveLayerFlags saveLayerFlags = 0) + : SaveLayerRec(bounds, paint, nullptr, 1.f, saveLayerFlags) {} + + /** Sets fBounds, fPaint, fBackdrop, and fSaveLayerFlags. + + @param bounds layer dimensions; may be nullptr + @param paint applied to layer when overlaying prior layer; + may be nullptr + @param backdrop If not null, this causes the current layer to be filtered by + backdrop, and then drawn into the new layer + (respecting the current clip). + If null, the new layer is initialized with transparent-black. + @param saveLayerFlags SaveLayerRec options to modify layer + @return SaveLayerRec fully specified + */ + SaveLayerRec(const SkRect* bounds, const SkPaint* paint, const SkImageFilter* backdrop, + SaveLayerFlags saveLayerFlags) + : SaveLayerRec(bounds, paint, backdrop, 1.f, saveLayerFlags) {} + + /** hints at layer size limit */ + const SkRect* fBounds = nullptr; + + /** modifies overlay */ + const SkPaint* fPaint = nullptr; + + /** + * If not null, this triggers the same initialization behavior as setting + * kInitWithPrevious_SaveLayerFlag on fSaveLayerFlags: the current layer is copied into + * the new layer, rather than initializing the new layer with transparent-black. + * This is then filtered by fBackdrop (respecting the current clip). + */ + const SkImageFilter* fBackdrop = nullptr; + + /** preserves LCD text, creates with prior layer contents */ + SaveLayerFlags fSaveLayerFlags = 0; + + private: + friend class SkCanvas; + friend class SkCanvasPriv; + + SaveLayerRec(const SkRect* bounds, const SkPaint* paint, const SkImageFilter* backdrop, + SkScalar backdropScale, SaveLayerFlags saveLayerFlags) + : fBounds(bounds) + , fPaint(paint) + , fBackdrop(backdrop) + , fSaveLayerFlags(saveLayerFlags) + , fExperimentalBackdropScale(backdropScale) {} + + // Relative scale factor that the image content used to initialize the layer when the + // kInitFromPrevious flag or a backdrop filter is used. + SkScalar fExperimentalBackdropScale = 1.f; + }; + + /** Saves SkMatrix and clip, and allocates SkBitmap for subsequent drawing. + + Calling restore() discards changes to SkMatrix and clip, + and blends SkBitmap with alpha opacity onto the prior layer. + + SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(), + setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(), + clipPath(), clipRegion(). + + SaveLayerRec contains the state used to create the layer. + + Call restoreToCount() with returned value to restore this and subsequent saves. + + @param layerRec layer state + @return depth of save state stack before this call was made. + + example: https://fiddle.skia.org/c/@Canvas_saveLayer_3 + */ + int saveLayer(const SaveLayerRec& layerRec); + + /** Removes changes to SkMatrix and clip since SkCanvas state was + last saved. The state is removed from the stack. + + Does nothing if the stack is empty. + + example: https://fiddle.skia.org/c/@AutoCanvasRestore_restore + + example: https://fiddle.skia.org/c/@Canvas_restore + */ + void restore(); + + /** Returns the number of saved states, each containing: SkMatrix and clip. + Equals the number of save() calls less the number of restore() calls plus one. + The save count of a new canvas is one. + + @return depth of save state stack + + example: https://fiddle.skia.org/c/@Canvas_getSaveCount + */ + int getSaveCount() const; + + /** Restores state to SkMatrix and clip values when save(), saveLayer(), + saveLayerPreserveLCDTextRequests(), or saveLayerAlpha() returned saveCount. + + Does nothing if saveCount is greater than state stack count. + Restores state to initial values if saveCount is less than or equal to one. + + @param saveCount depth of state stack to restore + + example: https://fiddle.skia.org/c/@Canvas_restoreToCount + */ + void restoreToCount(int saveCount); + + /** Translates SkMatrix by dx along the x-axis and dy along the y-axis. + + Mathematically, replaces SkMatrix with a translation matrix + premultiplied with SkMatrix. + + This has the effect of moving the drawing by (dx, dy) before transforming + the result with SkMatrix. + + @param dx distance to translate on x-axis + @param dy distance to translate on y-axis + + example: https://fiddle.skia.org/c/@Canvas_translate + */ + void translate(SkScalar dx, SkScalar dy); + + /** Scales SkMatrix by sx on the x-axis and sy on the y-axis. + + Mathematically, replaces SkMatrix with a scale matrix + premultiplied with SkMatrix. + + This has the effect of scaling the drawing by (sx, sy) before transforming + the result with SkMatrix. + + @param sx amount to scale on x-axis + @param sy amount to scale on y-axis + + example: https://fiddle.skia.org/c/@Canvas_scale + */ + void scale(SkScalar sx, SkScalar sy); + + /** Rotates SkMatrix by degrees. Positive degrees rotates clockwise. + + Mathematically, replaces SkMatrix with a rotation matrix + premultiplied with SkMatrix. + + This has the effect of rotating the drawing by degrees before transforming + the result with SkMatrix. + + @param degrees amount to rotate, in degrees + + example: https://fiddle.skia.org/c/@Canvas_rotate + */ + void rotate(SkScalar degrees); + + /** Rotates SkMatrix by degrees about a point at (px, py). Positive degrees rotates + clockwise. + + Mathematically, constructs a rotation matrix; premultiplies the rotation matrix by + a translation matrix; then replaces SkMatrix with the resulting matrix + premultiplied with SkMatrix. + + This has the effect of rotating the drawing about a given point before + transforming the result with SkMatrix. + + @param degrees amount to rotate, in degrees + @param px x-axis value of the point to rotate about + @param py y-axis value of the point to rotate about + + example: https://fiddle.skia.org/c/@Canvas_rotate_2 + */ + void rotate(SkScalar degrees, SkScalar px, SkScalar py); + + /** Skews SkMatrix by sx on the x-axis and sy on the y-axis. A positive value of sx + skews the drawing right as y-axis values increase; a positive value of sy skews + the drawing down as x-axis values increase. + + Mathematically, replaces SkMatrix with a skew matrix premultiplied with SkMatrix. + + This has the effect of skewing the drawing by (sx, sy) before transforming + the result with SkMatrix. + + @param sx amount to skew on x-axis + @param sy amount to skew on y-axis + + example: https://fiddle.skia.org/c/@Canvas_skew + */ + void skew(SkScalar sx, SkScalar sy); + + /** Replaces SkMatrix with matrix premultiplied with existing SkMatrix. + + This has the effect of transforming the drawn geometry by matrix, before + transforming the result with existing SkMatrix. + + @param matrix matrix to premultiply with existing SkMatrix + + example: https://fiddle.skia.org/c/@Canvas_concat + */ + void concat(const SkMatrix& matrix); + void concat(const SkM44&); + + /** Replaces SkMatrix with matrix. + Unlike concat(), any prior matrix state is overwritten. + + @param matrix matrix to copy, replacing existing SkMatrix + + example: https://fiddle.skia.org/c/@Canvas_setMatrix + */ + void setMatrix(const SkM44& matrix); + + // DEPRECATED -- use SkM44 version + void setMatrix(const SkMatrix& matrix); + + /** Sets SkMatrix to the identity matrix. + Any prior matrix state is overwritten. + + example: https://fiddle.skia.org/c/@Canvas_resetMatrix + */ + void resetMatrix(); + + /** Replaces clip with the intersection or difference of clip and rect, + with an aliased or anti-aliased clip edge. rect is transformed by SkMatrix + before it is combined with clip. + + @param rect SkRect to combine with clip + @param op SkClipOp to apply to clip + @param doAntiAlias true if clip is to be anti-aliased + + example: https://fiddle.skia.org/c/@Canvas_clipRect + */ + void clipRect(const SkRect& rect, SkClipOp op, bool doAntiAlias); + + /** Replaces clip with the intersection or difference of clip and rect. + Resulting clip is aliased; pixels are fully contained by the clip. + rect is transformed by SkMatrix before it is combined with clip. + + @param rect SkRect to combine with clip + @param op SkClipOp to apply to clip + */ + void clipRect(const SkRect& rect, SkClipOp op) { + this->clipRect(rect, op, false); + } + + /** Replaces clip with the intersection of clip and rect. + Resulting clip is aliased; pixels are fully contained by the clip. + rect is transformed by SkMatrix + before it is combined with clip. + + @param rect SkRect to combine with clip + @param doAntiAlias true if clip is to be anti-aliased + */ + void clipRect(const SkRect& rect, bool doAntiAlias = false) { + this->clipRect(rect, SkClipOp::kIntersect, doAntiAlias); + } + + void clipIRect(const SkIRect& irect, SkClipOp op = SkClipOp::kIntersect) { + this->clipRect(SkRect::Make(irect), op, false); + } + + /** Sets the maximum clip rectangle, which can be set by clipRect(), clipRRect() and + clipPath() and intersect the current clip with the specified rect. + The maximum clip affects only future clipping operations; it is not retroactive. + The clip restriction is not recorded in pictures. + + Pass an empty rect to disable maximum clip. + This private API is for use by Android framework only. + + DEPRECATED: Replace usage with SkAndroidFrameworkUtils::replaceClip() + + @param rect maximum allowed clip in device coordinates + */ + void androidFramework_setDeviceClipRestriction(const SkIRect& rect); + + /** Replaces clip with the intersection or difference of clip and rrect, + with an aliased or anti-aliased clip edge. + rrect is transformed by SkMatrix + before it is combined with clip. + + @param rrect SkRRect to combine with clip + @param op SkClipOp to apply to clip + @param doAntiAlias true if clip is to be anti-aliased + + example: https://fiddle.skia.org/c/@Canvas_clipRRect + */ + void clipRRect(const SkRRect& rrect, SkClipOp op, bool doAntiAlias); + + /** Replaces clip with the intersection or difference of clip and rrect. + Resulting clip is aliased; pixels are fully contained by the clip. + rrect is transformed by SkMatrix before it is combined with clip. + + @param rrect SkRRect to combine with clip + @param op SkClipOp to apply to clip + */ + void clipRRect(const SkRRect& rrect, SkClipOp op) { + this->clipRRect(rrect, op, false); + } + + /** Replaces clip with the intersection of clip and rrect, + with an aliased or anti-aliased clip edge. + rrect is transformed by SkMatrix before it is combined with clip. + + @param rrect SkRRect to combine with clip + @param doAntiAlias true if clip is to be anti-aliased + */ + void clipRRect(const SkRRect& rrect, bool doAntiAlias = false) { + this->clipRRect(rrect, SkClipOp::kIntersect, doAntiAlias); + } + + /** Replaces clip with the intersection or difference of clip and path, + with an aliased or anti-aliased clip edge. SkPath::FillType determines if path + describes the area inside or outside its contours; and if path contour overlaps + itself or another path contour, whether the overlaps form part of the area. + path is transformed by SkMatrix before it is combined with clip. + + @param path SkPath to combine with clip + @param op SkClipOp to apply to clip + @param doAntiAlias true if clip is to be anti-aliased + + example: https://fiddle.skia.org/c/@Canvas_clipPath + */ + void clipPath(const SkPath& path, SkClipOp op, bool doAntiAlias); + + /** Replaces clip with the intersection or difference of clip and path. + Resulting clip is aliased; pixels are fully contained by the clip. + SkPath::FillType determines if path + describes the area inside or outside its contours; and if path contour overlaps + itself or another path contour, whether the overlaps form part of the area. + path is transformed by SkMatrix + before it is combined with clip. + + @param path SkPath to combine with clip + @param op SkClipOp to apply to clip + */ + void clipPath(const SkPath& path, SkClipOp op) { + this->clipPath(path, op, false); + } + + /** Replaces clip with the intersection of clip and path. + Resulting clip is aliased; pixels are fully contained by the clip. + SkPath::FillType determines if path + describes the area inside or outside its contours; and if path contour overlaps + itself or another path contour, whether the overlaps form part of the area. + path is transformed by SkMatrix before it is combined with clip. + + @param path SkPath to combine with clip + @param doAntiAlias true if clip is to be anti-aliased + */ + void clipPath(const SkPath& path, bool doAntiAlias = false) { + this->clipPath(path, SkClipOp::kIntersect, doAntiAlias); + } + + void clipShader(sk_sp<SkShader>, SkClipOp = SkClipOp::kIntersect); + + /** Replaces clip with the intersection or difference of clip and SkRegion deviceRgn. + Resulting clip is aliased; pixels are fully contained by the clip. + deviceRgn is unaffected by SkMatrix. + + @param deviceRgn SkRegion to combine with clip + @param op SkClipOp to apply to clip + + example: https://fiddle.skia.org/c/@Canvas_clipRegion + */ + void clipRegion(const SkRegion& deviceRgn, SkClipOp op = SkClipOp::kIntersect); + + /** Returns true if SkRect rect, transformed by SkMatrix, can be quickly determined to be + outside of clip. May return false even though rect is outside of clip. + + Use to check if an area to be drawn is clipped out, to skip subsequent draw calls. + + @param rect SkRect to compare with clip + @return true if rect, transformed by SkMatrix, does not intersect clip + + example: https://fiddle.skia.org/c/@Canvas_quickReject + */ + bool quickReject(const SkRect& rect) const; + + /** Returns true if path, transformed by SkMatrix, can be quickly determined to be + outside of clip. May return false even though path is outside of clip. + + Use to check if an area to be drawn is clipped out, to skip subsequent draw calls. + + @param path SkPath to compare with clip + @return true if path, transformed by SkMatrix, does not intersect clip + + example: https://fiddle.skia.org/c/@Canvas_quickReject_2 + */ + bool quickReject(const SkPath& path) const; + + /** Returns bounds of clip, transformed by inverse of SkMatrix. If clip is empty, + return SkRect::MakeEmpty, where all SkRect sides equal zero. + + SkRect returned is outset by one to account for partial pixel coverage if clip + is anti-aliased. + + @return bounds of clip in local coordinates + + example: https://fiddle.skia.org/c/@Canvas_getLocalClipBounds + */ + SkRect getLocalClipBounds() const; + + /** Returns bounds of clip, transformed by inverse of SkMatrix. If clip is empty, + return false, and set bounds to SkRect::MakeEmpty, where all SkRect sides equal zero. + + bounds is outset by one to account for partial pixel coverage if clip + is anti-aliased. + + @param bounds SkRect of clip in local coordinates + @return true if clip bounds is not empty + */ + bool getLocalClipBounds(SkRect* bounds) const { + *bounds = this->getLocalClipBounds(); + return !bounds->isEmpty(); + } + + /** Returns SkIRect bounds of clip, unaffected by SkMatrix. If clip is empty, + return SkRect::MakeEmpty, where all SkRect sides equal zero. + + Unlike getLocalClipBounds(), returned SkIRect is not outset. + + @return bounds of clip in SkBaseDevice coordinates + + example: https://fiddle.skia.org/c/@Canvas_getDeviceClipBounds + */ + SkIRect getDeviceClipBounds() const; + + /** Returns SkIRect bounds of clip, unaffected by SkMatrix. If clip is empty, + return false, and set bounds to SkRect::MakeEmpty, where all SkRect sides equal zero. + + Unlike getLocalClipBounds(), bounds is not outset. + + @param bounds SkRect of clip in device coordinates + @return true if clip bounds is not empty + */ + bool getDeviceClipBounds(SkIRect* bounds) const { + *bounds = this->getDeviceClipBounds(); + return !bounds->isEmpty(); + } + + /** Fills clip with color color. + mode determines how ARGB is combined with destination. + + @param color unpremultiplied ARGB + @param mode SkBlendMode used to combine source color and destination + + example: https://fiddle.skia.org/c/@Canvas_drawColor + */ + void drawColor(SkColor color, SkBlendMode mode = SkBlendMode::kSrcOver) { + this->drawColor(SkColor4f::FromColor(color), mode); + } + + /** Fills clip with color color. + mode determines how ARGB is combined with destination. + + @param color SkColor4f representing unpremultiplied color. + @param mode SkBlendMode used to combine source color and destination + */ + void drawColor(const SkColor4f& color, SkBlendMode mode = SkBlendMode::kSrcOver); + + /** Fills clip with color color using SkBlendMode::kSrc. + This has the effect of replacing all pixels contained by clip with color. + + @param color unpremultiplied ARGB + */ + void clear(SkColor color) { + this->clear(SkColor4f::FromColor(color)); + } + + /** Fills clip with color color using SkBlendMode::kSrc. + This has the effect of replacing all pixels contained by clip with color. + + @param color SkColor4f representing unpremultiplied color. + */ + void clear(const SkColor4f& color) { + this->drawColor(color, SkBlendMode::kSrc); + } + + /** Makes SkCanvas contents undefined. Subsequent calls that read SkCanvas pixels, + such as drawing with SkBlendMode, return undefined results. discard() does + not change clip or SkMatrix. + + discard() may do nothing, depending on the implementation of SkSurface or SkBaseDevice + that created SkCanvas. + + discard() allows optimized performance on subsequent draws by removing + cached data associated with SkSurface or SkBaseDevice. + It is not necessary to call discard() once done with SkCanvas; + any cached data is deleted when owning SkSurface or SkBaseDevice is deleted. + */ + void discard() { this->onDiscard(); } + + /** Fills clip with SkPaint paint. SkPaint components, SkShader, + SkColorFilter, SkImageFilter, and SkBlendMode affect drawing; + SkMaskFilter and SkPathEffect in paint are ignored. + + @param paint graphics state used to fill SkCanvas + + example: https://fiddle.skia.org/c/@Canvas_drawPaint + */ + void drawPaint(const SkPaint& paint); + + /** \enum SkCanvas::PointMode + Selects if an array of points are drawn as discrete points, as lines, or as + an open polygon. + */ + enum PointMode { + kPoints_PointMode, //!< draw each point separately + kLines_PointMode, //!< draw each pair of points as a line segment + kPolygon_PointMode, //!< draw the array of points as a open polygon + }; + + /** Draws pts using clip, SkMatrix and SkPaint paint. + count is the number of points; if count is less than one, has no effect. + mode may be one of: kPoints_PointMode, kLines_PointMode, or kPolygon_PointMode. + + If mode is kPoints_PointMode, the shape of point drawn depends on paint + SkPaint::Cap. If paint is set to SkPaint::kRound_Cap, each point draws a + circle of diameter SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap + or SkPaint::kButt_Cap, each point draws a square of width and height + SkPaint stroke width. + + If mode is kLines_PointMode, each pair of points draws a line segment. + One line is drawn for every two points; each point is used once. If count is odd, + the final point is ignored. + + If mode is kPolygon_PointMode, each adjacent pair of points draws a line segment. + count minus one lines are drawn; the first and last point are used once. + + Each line segment respects paint SkPaint::Cap and SkPaint stroke width. + SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style. + + Always draws each element one at a time; is not affected by + SkPaint::Join, and unlike drawPath(), does not create a mask from all points + and lines before drawing. + + @param mode whether pts draws points or lines + @param count number of points in the array + @param pts array of points to draw + @param paint stroke, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawPoints + */ + void drawPoints(PointMode mode, size_t count, const SkPoint pts[], const SkPaint& paint); + + /** Draws point at (x, y) using clip, SkMatrix and SkPaint paint. + + The shape of point drawn depends on paint SkPaint::Cap. + If paint is set to SkPaint::kRound_Cap, draw a circle of diameter + SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap or SkPaint::kButt_Cap, + draw a square of width and height SkPaint stroke width. + SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style. + + @param x left edge of circle or square + @param y top edge of circle or square + @param paint stroke, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawPoint + */ + void drawPoint(SkScalar x, SkScalar y, const SkPaint& paint); + + /** Draws point p using clip, SkMatrix and SkPaint paint. + + The shape of point drawn depends on paint SkPaint::Cap. + If paint is set to SkPaint::kRound_Cap, draw a circle of diameter + SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap or SkPaint::kButt_Cap, + draw a square of width and height SkPaint stroke width. + SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style. + + @param p top-left edge of circle or square + @param paint stroke, blend, color, and so on, used to draw + */ + void drawPoint(SkPoint p, const SkPaint& paint) { + this->drawPoint(p.x(), p.y(), paint); + } + + /** Draws line segment from (x0, y0) to (x1, y1) using clip, SkMatrix, and SkPaint paint. + In paint: SkPaint stroke width describes the line thickness; + SkPaint::Cap draws the end rounded or square; + SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style. + + @param x0 start of line segment on x-axis + @param y0 start of line segment on y-axis + @param x1 end of line segment on x-axis + @param y1 end of line segment on y-axis + @param paint stroke, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawLine + */ + void drawLine(SkScalar x0, SkScalar y0, SkScalar x1, SkScalar y1, const SkPaint& paint); + + /** Draws line segment from p0 to p1 using clip, SkMatrix, and SkPaint paint. + In paint: SkPaint stroke width describes the line thickness; + SkPaint::Cap draws the end rounded or square; + SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style. + + @param p0 start of line segment + @param p1 end of line segment + @param paint stroke, blend, color, and so on, used to draw + */ + void drawLine(SkPoint p0, SkPoint p1, const SkPaint& paint) { + this->drawLine(p0.x(), p0.y(), p1.x(), p1.y(), paint); + } + + /** Draws SkRect rect using clip, SkMatrix, and SkPaint paint. + In paint: SkPaint::Style determines if rectangle is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness, and + SkPaint::Join draws the corners rounded or square. + + @param rect rectangle to draw + @param paint stroke or fill, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawRect + */ + void drawRect(const SkRect& rect, const SkPaint& paint); + + /** Draws SkIRect rect using clip, SkMatrix, and SkPaint paint. + In paint: SkPaint::Style determines if rectangle is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness, and + SkPaint::Join draws the corners rounded or square. + + @param rect rectangle to draw + @param paint stroke or fill, blend, color, and so on, used to draw + */ + void drawIRect(const SkIRect& rect, const SkPaint& paint) { + SkRect r; + r.set(rect); // promotes the ints to scalars + this->drawRect(r, paint); + } + + /** Draws SkRegion region using clip, SkMatrix, and SkPaint paint. + In paint: SkPaint::Style determines if rectangle is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness, and + SkPaint::Join draws the corners rounded or square. + + @param region region to draw + @param paint SkPaint stroke or fill, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawRegion + */ + void drawRegion(const SkRegion& region, const SkPaint& paint); + + /** Draws oval oval using clip, SkMatrix, and SkPaint. + In paint: SkPaint::Style determines if oval is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness. + + @param oval SkRect bounds of oval + @param paint SkPaint stroke or fill, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawOval + */ + void drawOval(const SkRect& oval, const SkPaint& paint); + + /** Draws SkRRect rrect using clip, SkMatrix, and SkPaint paint. + In paint: SkPaint::Style determines if rrect is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness. + + rrect may represent a rectangle, circle, oval, uniformly rounded rectangle, or + may have any combination of positive non-square radii for the four corners. + + @param rrect SkRRect with up to eight corner radii to draw + @param paint SkPaint stroke or fill, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawRRect + */ + void drawRRect(const SkRRect& rrect, const SkPaint& paint); + + /** Draws SkRRect outer and inner + using clip, SkMatrix, and SkPaint paint. + outer must contain inner or the drawing is undefined. + In paint: SkPaint::Style determines if SkRRect is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness. + If stroked and SkRRect corner has zero length radii, SkPaint::Join can + draw corners rounded or square. + + GPU-backed platforms optimize drawing when both outer and inner are + concave and outer contains inner. These platforms may not be able to draw + SkPath built with identical data as fast. + + @param outer SkRRect outer bounds to draw + @param inner SkRRect inner bounds to draw + @param paint SkPaint stroke or fill, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawDRRect_a + example: https://fiddle.skia.org/c/@Canvas_drawDRRect_b + */ + void drawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint); + + /** Draws circle at (cx, cy) with radius using clip, SkMatrix, and SkPaint paint. + If radius is zero or less, nothing is drawn. + In paint: SkPaint::Style determines if circle is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness. + + @param cx circle center on the x-axis + @param cy circle center on the y-axis + @param radius half the diameter of circle + @param paint SkPaint stroke or fill, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawCircle + */ + void drawCircle(SkScalar cx, SkScalar cy, SkScalar radius, const SkPaint& paint); + + /** Draws circle at center with radius using clip, SkMatrix, and SkPaint paint. + If radius is zero or less, nothing is drawn. + In paint: SkPaint::Style determines if circle is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness. + + @param center circle center + @param radius half the diameter of circle + @param paint SkPaint stroke or fill, blend, color, and so on, used to draw + */ + void drawCircle(SkPoint center, SkScalar radius, const SkPaint& paint) { + this->drawCircle(center.x(), center.y(), radius, paint); + } + + /** Draws arc using clip, SkMatrix, and SkPaint paint. + + Arc is part of oval bounded by oval, sweeping from startAngle to startAngle plus + sweepAngle. startAngle and sweepAngle are in degrees. + + startAngle of zero places start point at the right middle edge of oval. + A positive sweepAngle places arc end point clockwise from start point; + a negative sweepAngle places arc end point counterclockwise from start point. + sweepAngle may exceed 360 degrees, a full circle. + If useCenter is true, draw a wedge that includes lines from oval + center to arc end points. If useCenter is false, draw arc between end points. + + If SkRect oval is empty or sweepAngle is zero, nothing is drawn. + + @param oval SkRect bounds of oval containing arc to draw + @param startAngle angle in degrees where arc begins + @param sweepAngle sweep angle in degrees; positive is clockwise + @param useCenter if true, include the center of the oval + @param paint SkPaint stroke or fill, blend, color, and so on, used to draw + */ + void drawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle, + bool useCenter, const SkPaint& paint); + + /** Draws SkRRect bounded by SkRect rect, with corner radii (rx, ry) using clip, + SkMatrix, and SkPaint paint. + + In paint: SkPaint::Style determines if SkRRect is stroked or filled; + if stroked, SkPaint stroke width describes the line thickness. + If rx or ry are less than zero, they are treated as if they are zero. + If rx plus ry exceeds rect width or rect height, radii are scaled down to fit. + If rx and ry are zero, SkRRect is drawn as SkRect and if stroked is affected by + SkPaint::Join. + + @param rect SkRect bounds of SkRRect to draw + @param rx axis length on x-axis of oval describing rounded corners + @param ry axis length on y-axis of oval describing rounded corners + @param paint stroke, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawRoundRect + */ + void drawRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry, const SkPaint& paint); + + /** Draws SkPath path using clip, SkMatrix, and SkPaint paint. + SkPath contains an array of path contour, each of which may be open or closed. + + In paint: SkPaint::Style determines if SkRRect is stroked or filled: + if filled, SkPath::FillType determines whether path contour describes inside or + outside of fill; if stroked, SkPaint stroke width describes the line thickness, + SkPaint::Cap describes line ends, and SkPaint::Join describes how + corners are drawn. + + @param path SkPath to draw + @param paint stroke, blend, color, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawPath + */ + void drawPath(const SkPath& path, const SkPaint& paint); + + void drawImage(const SkImage* image, SkScalar left, SkScalar top) { + this->drawImage(image, left, top, SkSamplingOptions(), nullptr); + } + void drawImage(const sk_sp<SkImage>& image, SkScalar left, SkScalar top) { + this->drawImage(image.get(), left, top, SkSamplingOptions(), nullptr); + } + + /** \enum SkCanvas::SrcRectConstraint + SrcRectConstraint controls the behavior at the edge of source SkRect, + provided to drawImageRect() when there is any filtering. If kStrict is set, + then extra code is used to ensure it nevers samples outside of the src-rect. + */ + enum SrcRectConstraint { + kStrict_SrcRectConstraint, //!< sample only inside bounds; slower + kFast_SrcRectConstraint, //!< sample outside bounds; faster + }; + + void drawImage(const SkImage*, SkScalar x, SkScalar y, const SkSamplingOptions&, + const SkPaint* = nullptr); + void drawImage(const sk_sp<SkImage>& image, SkScalar x, SkScalar y, + const SkSamplingOptions& sampling, const SkPaint* paint = nullptr) { + this->drawImage(image.get(), x, y, sampling, paint); + } + void drawImageRect(const SkImage*, const SkRect& src, const SkRect& dst, + const SkSamplingOptions&, const SkPaint*, SrcRectConstraint); + void drawImageRect(const SkImage*, const SkRect& dst, const SkSamplingOptions&, + const SkPaint* = nullptr); + void drawImageRect(const sk_sp<SkImage>& image, const SkRect& src, const SkRect& dst, + const SkSamplingOptions& sampling, const SkPaint* paint, + SrcRectConstraint constraint) { + this->drawImageRect(image.get(), src, dst, sampling, paint, constraint); + } + void drawImageRect(const sk_sp<SkImage>& image, const SkRect& dst, + const SkSamplingOptions& sampling, const SkPaint* paint = nullptr) { + this->drawImageRect(image.get(), dst, sampling, paint); + } + + /** Draws SkImage image stretched proportionally to fit into SkRect dst. + SkIRect center divides the image into nine sections: four sides, four corners, and + the center. Corners are unmodified or scaled down proportionately if their sides + are larger than dst; center and four sides are scaled to fit remaining space, if any. + + Additionally transform draw using clip, SkMatrix, and optional SkPaint paint. + + If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter, and + SkBlendMode. If image is kAlpha_8_SkColorType, apply SkShader. + If paint contains SkMaskFilter, generate mask from image bounds. + Any SkMaskFilter on paint is ignored as is paint anti-aliasing state. + + If generated mask extends beyond image bounds, replicate image edge colors, just + as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set + replicates the image edge color when it samples outside of its bounds. + + @param image SkImage containing pixels, dimensions, and format + @param center SkIRect edge of image corners and sides + @param dst destination SkRect of image to draw to + @param filter what technique to use when sampling the image + @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter, + and so on; or nullptr + */ + void drawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst, + SkFilterMode filter, const SkPaint* paint = nullptr); + + /** \struct SkCanvas::Lattice + SkCanvas::Lattice divides SkBitmap or SkImage into a rectangular grid. + Grid entries on even columns and even rows are fixed; these entries are + always drawn at their original size if the destination is large enough. + If the destination side is too small to hold the fixed entries, all fixed + entries are proportionately scaled down to fit. + The grid entries not on even columns and rows are scaled to fit the + remaining space, if any. + */ + struct Lattice { + + /** \enum SkCanvas::Lattice::RectType + Optional setting per rectangular grid entry to make it transparent, + or to fill the grid entry with a color. + */ + enum RectType : uint8_t { + kDefault = 0, //!< draws SkBitmap into lattice rectangle + kTransparent, //!< skips lattice rectangle by making it transparent + kFixedColor, //!< draws one of fColors into lattice rectangle + }; + + const int* fXDivs; //!< x-axis values dividing bitmap + const int* fYDivs; //!< y-axis values dividing bitmap + const RectType* fRectTypes; //!< array of fill types + int fXCount; //!< number of x-coordinates + int fYCount; //!< number of y-coordinates + const SkIRect* fBounds; //!< source bounds to draw from + const SkColor* fColors; //!< array of colors + }; + + /** Draws SkImage image stretched proportionally to fit into SkRect dst. + + SkCanvas::Lattice lattice divides image into a rectangular grid. + Each intersection of an even-numbered row and column is fixed; + fixed lattice elements never scale larger than their initial + size and shrink proportionately when all fixed elements exceed the bitmap + dimension. All other grid elements scale to fill the available space, if any. + + Additionally transform draw using clip, SkMatrix, and optional SkPaint paint. + + If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter, and + SkBlendMode. If image is kAlpha_8_SkColorType, apply SkShader. + If paint contains SkMaskFilter, generate mask from image bounds. + Any SkMaskFilter on paint is ignored as is paint anti-aliasing state. + + If generated mask extends beyond bitmap bounds, replicate bitmap edge colors, + just as SkShader made from SkShader::MakeBitmapShader with + SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples + outside of its bounds. + + @param image SkImage containing pixels, dimensions, and format + @param lattice division of bitmap into fixed and variable rectangles + @param dst destination SkRect of image to draw to + @param filter what technique to use when sampling the image + @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter, + and so on; or nullptr + */ + void drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst, + SkFilterMode filter, const SkPaint* paint = nullptr); + void drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst) { + this->drawImageLattice(image, lattice, dst, SkFilterMode::kNearest, nullptr); + } + + /** + * Experimental. Controls anti-aliasing of each edge of images in an image-set. + */ + enum QuadAAFlags : unsigned { + kLeft_QuadAAFlag = 0b0001, + kTop_QuadAAFlag = 0b0010, + kRight_QuadAAFlag = 0b0100, + kBottom_QuadAAFlag = 0b1000, + + kNone_QuadAAFlags = 0b0000, + kAll_QuadAAFlags = 0b1111, + }; + + /** This is used by the experimental API below. */ + struct SK_API ImageSetEntry { + ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect, const SkRect& dstRect, + int matrixIndex, float alpha, unsigned aaFlags, bool hasClip); + + ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect, const SkRect& dstRect, + float alpha, unsigned aaFlags); + + ImageSetEntry(); + ~ImageSetEntry(); + ImageSetEntry(const ImageSetEntry&); + ImageSetEntry& operator=(const ImageSetEntry&); + + sk_sp<const SkImage> fImage; + SkRect fSrcRect; + SkRect fDstRect; + int fMatrixIndex = -1; // Index into the preViewMatrices arg, or < 0 + float fAlpha = 1.f; + unsigned fAAFlags = kNone_QuadAAFlags; // QuadAAFlags + bool fHasClip = false; // True to use next 4 points in dstClip arg as quad + }; + + /** + * This is an experimental API for the SkiaRenderer Chromium project, and its API will surely + * evolve if it is not removed outright. + * + * This behaves very similarly to drawRect() combined with a clipPath() formed by clip + * quadrilateral. 'rect' and 'clip' are in the same coordinate space. If 'clip' is null, then it + * is as if the rectangle was not clipped (or, alternatively, clipped to itself). If not null, + * then it must provide 4 points. + * + * In addition to combining the draw and clipping into one operation, this function adds the + * additional capability of controlling each of the rectangle's edges anti-aliasing + * independently. The edges of the clip will respect the per-edge AA flags. It is required that + * 'clip' be contained inside 'rect'. In terms of mapping to edge labels, the 'clip' points + * should be ordered top-left, top-right, bottom-right, bottom-left so that the edge between [0] + * and [1] is "top", [1] and [2] is "right", [2] and [3] is "bottom", and [3] and [0] is "left". + * This ordering matches SkRect::toQuad(). + * + * This API only draws solid color, filled rectangles so it does not accept a full SkPaint. + */ + void experimental_DrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags, + const SkColor4f& color, SkBlendMode mode); + void experimental_DrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags, + SkColor color, SkBlendMode mode) { + this->experimental_DrawEdgeAAQuad(rect, clip, aaFlags, SkColor4f::FromColor(color), mode); + } + + /** + * This is an bulk variant of experimental_DrawEdgeAAQuad() that renders 'cnt' textured quads. + * For each entry, 'fDstRect' is rendered with its clip (determined by entry's 'fHasClip' and + * the current index in 'dstClip'). The entry's fImage is applied to the destination rectangle + * by sampling from 'fSrcRect' sub-image. The corners of 'fSrcRect' map to the corners of + * 'fDstRect', just like in drawImageRect(), and they will be properly interpolated when + * applying a clip. + * + * Like experimental_DrawEdgeAAQuad(), each entry can specify edge AA flags that apply to both + * the destination rect and its clip. + * + * If provided, the 'dstClips' array must have length equal 4 * the number of entries with + * fHasClip true. If 'dstClips' is null, every entry must have 'fHasClip' set to false. The + * destination clip coordinates will be read consecutively with the image set entries, advancing + * by 4 points every time an entry with fHasClip is passed. + * + * This entry point supports per-entry manipulations to the canvas's current matrix. If an + * entry provides 'fMatrixIndex' >= 0, it will be drawn as if the canvas's CTM was + * canvas->getTotalMatrix() * preViewMatrices[fMatrixIndex]. If 'fMatrixIndex' is less than 0, + * the pre-view matrix transform is implicitly the identity, so it will be drawn using just the + * current canvas matrix. The pre-view matrix modifies the canvas's view matrix, it does not + * affect the local coordinates of each entry. + * + * An optional paint may be provided, which supports the same subset of features usable with + * drawImageRect (i.e. assumed to be filled and no path effects). When a paint is provided, the + * image set is drawn as if each image used the applied paint independently, so each is affected + * by the image, color, and/or mask filter. + */ + void experimental_DrawEdgeAAImageSet(const ImageSetEntry imageSet[], int cnt, + const SkPoint dstClips[], const SkMatrix preViewMatrices[], + const SkSamplingOptions&, const SkPaint* paint = nullptr, + SrcRectConstraint constraint = kStrict_SrcRectConstraint); + + /** Draws text, with origin at (x, y), using clip, SkMatrix, SkFont font, + and SkPaint paint. + + When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or + SkTextEncoding::kUTF32, this function uses the default + character-to-glyph mapping from the SkTypeface in font. It does not + perform typeface fallback for characters not found in the SkTypeface. + It does not perform kerning or other complex shaping; glyphs are + positioned based on their default advances. + + Text meaning depends on SkTextEncoding. + + Text size is affected by SkMatrix and SkFont text size. Default text + size is 12 point. + + All elements of paint: SkPathEffect, SkMaskFilter, SkShader, + SkColorFilter, and SkImageFilter; apply to text. By + default, draws filled black glyphs. + + @param text character code points or glyphs drawn + @param byteLength byte length of text array + @param encoding text encoding used in the text array + @param x start of text on x-axis + @param y start of text on y-axis + @param font typeface, text size and so, used to describe the text + @param paint blend, color, and so on, used to draw + */ + void drawSimpleText(const void* text, size_t byteLength, SkTextEncoding encoding, + SkScalar x, SkScalar y, const SkFont& font, const SkPaint& paint); + + /** Draws null terminated string, with origin at (x, y), using clip, SkMatrix, + SkFont font, and SkPaint paint. + + This function uses the default character-to-glyph mapping from the + SkTypeface in font. It does not perform typeface fallback for + characters not found in the SkTypeface. It does not perform kerning; + glyphs are positioned based on their default advances. + + String str is encoded as UTF-8. + + Text size is affected by SkMatrix and font text size. Default text + size is 12 point. + + All elements of paint: SkPathEffect, SkMaskFilter, SkShader, + SkColorFilter, and SkImageFilter; apply to text. By + default, draws filled black glyphs. + + @param str character code points drawn, + ending with a char value of zero + @param x start of string on x-axis + @param y start of string on y-axis + @param font typeface, text size and so, used to describe the text + @param paint blend, color, and so on, used to draw + */ + void drawString(const char str[], SkScalar x, SkScalar y, const SkFont& font, + const SkPaint& paint) { + this->drawSimpleText(str, strlen(str), SkTextEncoding::kUTF8, x, y, font, paint); + } + + /** Draws SkString, with origin at (x, y), using clip, SkMatrix, SkFont font, + and SkPaint paint. + + This function uses the default character-to-glyph mapping from the + SkTypeface in font. It does not perform typeface fallback for + characters not found in the SkTypeface. It does not perform kerning; + glyphs are positioned based on their default advances. + + SkString str is encoded as UTF-8. + + Text size is affected by SkMatrix and SkFont text size. Default text + size is 12 point. + + All elements of paint: SkPathEffect, SkMaskFilter, SkShader, + SkColorFilter, and SkImageFilter; apply to text. By + default, draws filled black glyphs. + + @param str character code points drawn, + ending with a char value of zero + @param x start of string on x-axis + @param y start of string on y-axis + @param font typeface, text size and so, used to describe the text + @param paint blend, color, and so on, used to draw + */ + void drawString(const SkString& str, SkScalar x, SkScalar y, const SkFont& font, + const SkPaint& paint) { + this->drawSimpleText(str.c_str(), str.size(), SkTextEncoding::kUTF8, x, y, font, paint); + } + + /** Draws count glyphs, at positions relative to origin styled with font and paint with + supporting utf8 and cluster information. + + This function draw glyphs at the given positions relative to the given origin. + It does not perform typeface fallback for glyphs not found in the SkTypeface in font. + + The drawing obeys the current transform matrix and clipping. + + All elements of paint: SkPathEffect, SkMaskFilter, SkShader, + SkColorFilter, and SkImageFilter; apply to text. By + default, draws filled black glyphs. + + @param count number of glyphs to draw + @param glyphs the array of glyphIDs to draw + @param positions where to draw each glyph relative to origin + @param clusters array of size count of cluster information + @param textByteCount size of the utf8text + @param utf8text utf8text supporting information for the glyphs + @param origin the origin of all the positions + @param font typeface, text size and so, used to describe the text + @param paint blend, color, and so on, used to draw + */ + void drawGlyphs(int count, const SkGlyphID glyphs[], const SkPoint positions[], + const uint32_t clusters[], int textByteCount, const char utf8text[], + SkPoint origin, const SkFont& font, const SkPaint& paint); + + /** Draws count glyphs, at positions relative to origin styled with font and paint. + + This function draw glyphs at the given positions relative to the given origin. + It does not perform typeface fallback for glyphs not found in the SkTypeface in font. + + The drawing obeys the current transform matrix and clipping. + + All elements of paint: SkPathEffect, SkMaskFilter, SkShader, + SkColorFilter, and SkImageFilter; apply to text. By + default, draws filled black glyphs. + + @param count number of glyphs to draw + @param glyphs the array of glyphIDs to draw + @param positions where to draw each glyph relative to origin + @param origin the origin of all the positions + @param font typeface, text size and so, used to describe the text + @param paint blend, color, and so on, used to draw + */ + void drawGlyphs(int count, const SkGlyphID glyphs[], const SkPoint positions[], + SkPoint origin, const SkFont& font, const SkPaint& paint); + + /** Draws count glyphs, at positions relative to origin styled with font and paint. + + This function draw glyphs using the given scaling and rotations. They are positioned + relative to the given origin. It does not perform typeface fallback for glyphs not found + in the SkTypeface in font. + + The drawing obeys the current transform matrix and clipping. + + All elements of paint: SkPathEffect, SkMaskFilter, SkShader, + SkColorFilter, and SkImageFilter; apply to text. By + default, draws filled black glyphs. + + @param count number of glyphs to draw + @param glyphs the array of glyphIDs to draw + @param xforms where to draw and orient each glyph + @param origin the origin of all the positions + @param font typeface, text size and so, used to describe the text + @param paint blend, color, and so on, used to draw + */ + void drawGlyphs(int count, const SkGlyphID glyphs[], const SkRSXform xforms[], + SkPoint origin, const SkFont& font, const SkPaint& paint); + + /** Draws SkTextBlob blob at (x, y), using clip, SkMatrix, and SkPaint paint. + + blob contains glyphs, their positions, and paint attributes specific to text: + SkTypeface, SkPaint text size, SkPaint text scale x, + SkPaint text skew x, SkPaint::Align, SkPaint::Hinting, anti-alias, SkPaint fake bold, + SkPaint font embedded bitmaps, SkPaint full hinting spacing, LCD text, SkPaint linear text, + and SkPaint subpixel text. + + SkTextEncoding must be set to SkTextEncoding::kGlyphID. + + Elements of paint: anti-alias, SkBlendMode, color including alpha, + SkColorFilter, SkPaint dither, SkMaskFilter, SkPathEffect, SkShader, and + SkPaint::Style; apply to blob. If SkPaint contains SkPaint::kStroke_Style: + SkPaint miter limit, SkPaint::Cap, SkPaint::Join, and SkPaint stroke width; + apply to SkPath created from blob. + + @param blob glyphs, positions, and their paints' text size, typeface, and so on + @param x horizontal offset applied to blob + @param y vertical offset applied to blob + @param paint blend, color, stroking, and so on, used to draw + + example: https://fiddle.skia.org/c/@Canvas_drawTextBlob + */ + void drawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, const SkPaint& paint); + + /** Draws SkTextBlob blob at (x, y), using clip, SkMatrix, and SkPaint paint. + + blob contains glyphs, their positions, and paint attributes specific to text: + SkTypeface, SkPaint text size, SkPaint text scale x, + SkPaint text skew x, SkPaint::Align, SkPaint::Hinting, anti-alias, SkPaint fake bold, + SkPaint font embedded bitmaps, SkPaint full hinting spacing, LCD text, SkPaint linear text, + and SkPaint subpixel text. + + SkTextEncoding must be set to SkTextEncoding::kGlyphID. + + Elements of paint: SkPathEffect, SkMaskFilter, SkShader, SkColorFilter, + and SkImageFilter; apply to blob. + + @param blob glyphs, positions, and their paints' text size, typeface, and so on + @param x horizontal offset applied to blob + @param y vertical offset applied to blob + @param paint blend, color, stroking, and so on, used to draw + */ + void drawTextBlob(const sk_sp<SkTextBlob>& blob, SkScalar x, SkScalar y, const SkPaint& paint) { + this->drawTextBlob(blob.get(), x, y, paint); + } + + /** Draws SkPicture picture, using clip and SkMatrix. + Clip and SkMatrix are unchanged by picture contents, as if + save() was called before and restore() was called after drawPicture(). + + SkPicture records a series of draw commands for later playback. + + @param picture recorded drawing commands to play + */ + void drawPicture(const SkPicture* picture) { + this->drawPicture(picture, nullptr, nullptr); + } + + /** Draws SkPicture picture, using clip and SkMatrix. + Clip and SkMatrix are unchanged by picture contents, as if + save() was called before and restore() was called after drawPicture(). + + SkPicture records a series of draw commands for later playback. + + @param picture recorded drawing commands to play + */ + void drawPicture(const sk_sp<SkPicture>& picture) { + this->drawPicture(picture.get()); + } + + /** Draws SkPicture picture, using clip and SkMatrix; transforming picture with + SkMatrix matrix, if provided; and use SkPaint paint alpha, SkColorFilter, + SkImageFilter, and SkBlendMode, if provided. + + If paint is non-null, then the picture is always drawn into a temporary layer before + actually landing on the canvas. Note that drawing into a layer can also change its + appearance if there are any non-associative blendModes inside any of the pictures elements. + + @param picture recorded drawing commands to play + @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr + @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr + + example: https://fiddle.skia.org/c/@Canvas_drawPicture_3 + */ + void drawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint); + + /** Draws SkPicture picture, using clip and SkMatrix; transforming picture with + SkMatrix matrix, if provided; and use SkPaint paint alpha, SkColorFilter, + SkImageFilter, and SkBlendMode, if provided. + + If paint is non-null, then the picture is always drawn into a temporary layer before + actually landing on the canvas. Note that drawing into a layer can also change its + appearance if there are any non-associative blendModes inside any of the pictures elements. + + @param picture recorded drawing commands to play + @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr + @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr + */ + void drawPicture(const sk_sp<SkPicture>& picture, const SkMatrix* matrix, + const SkPaint* paint) { + this->drawPicture(picture.get(), matrix, paint); + } + + /** Draws SkVertices vertices, a triangle mesh, using clip and SkMatrix. + If paint contains an SkShader and vertices does not contain texCoords, the shader + is mapped using the vertices' positions. + + SkBlendMode is ignored if SkVertices does not have colors. Otherwise, it combines + - the SkShader if SkPaint contains SkShader + - or the opaque SkPaint color if SkPaint does not contain SkShader + as the src of the blend and the interpolated vertex colors as the dst. + + SkMaskFilter, SkPathEffect, and antialiasing on SkPaint are ignored. + + @param vertices triangle mesh to draw + @param mode combines vertices' colors with SkShader if present or SkPaint opaque color + if not. Ignored if the vertices do not contain color. + @param paint specifies the SkShader, used as SkVertices texture, and SkColorFilter. + + example: https://fiddle.skia.org/c/@Canvas_drawVertices + */ + void drawVertices(const SkVertices* vertices, SkBlendMode mode, const SkPaint& paint); + + /** Draws SkVertices vertices, a triangle mesh, using clip and SkMatrix. + If paint contains an SkShader and vertices does not contain texCoords, the shader + is mapped using the vertices' positions. + + SkBlendMode is ignored if SkVertices does not have colors. Otherwise, it combines + - the SkShader if SkPaint contains SkShader + - or the opaque SkPaint color if SkPaint does not contain SkShader + as the src of the blend and the interpolated vertex colors as the dst. + + SkMaskFilter, SkPathEffect, and antialiasing on SkPaint are ignored. + + @param vertices triangle mesh to draw + @param mode combines vertices' colors with SkShader if present or SkPaint opaque color + if not. Ignored if the vertices do not contain color. + @param paint specifies the SkShader, used as SkVertices texture, may be nullptr + + example: https://fiddle.skia.org/c/@Canvas_drawVertices_2 + */ + void drawVertices(const sk_sp<SkVertices>& vertices, SkBlendMode mode, const SkPaint& paint); + +#if defined(SK_ENABLE_EXPERIMENTAL_CUSTOM_MESH) && defined(SK_ENABLE_SKSL) + /** + Experimental, under active development, and subject to change without notice. + + Draws a mesh using a user-defined specification (see SkCustomMeshSpecification). + + SkBlender is ignored if SkCustomMesh's specification does not output fragment shader color. + Otherwise, it combines + - the SkShader if SkPaint contains SkShader + - or the opaque SkPaint color if SkPaint does not contain SkShader + as the src of the blend and the mesh's fragment color as the dst. + + SkMaskFilter, SkPathEffect, and antialiasing on SkPaint are ignored. + + @param cm the custom mesh vertices and compatible specification. + @param blender combines vertices colors with SkShader if present or SkPaint opaque color + if not. Ignored if the custom mesh does not output color. Defaults to + SkBlendMode::kModulate if nullptr. + @param paint specifies the SkShader, used as SkVertices texture, may be nullptr + */ + void drawCustomMesh(SkCustomMesh cm, sk_sp<SkBlender> blender, const SkPaint& paint); +#endif + + /** Draws a Coons patch: the interpolation of four cubics with shared corners, + associating a color, and optionally a texture SkPoint, with each corner. + + SkPoint array cubics specifies four SkPath cubic starting at the top-left corner, + in clockwise order, sharing every fourth point. The last SkPath cubic ends at the + first point. + + Color array color associates colors with corners in top-left, top-right, + bottom-right, bottom-left order. + + If paint contains SkShader, SkPoint array texCoords maps SkShader as texture to + corners in top-left, top-right, bottom-right, bottom-left order. If texCoords is + nullptr, SkShader is mapped using positions (derived from cubics). + + SkBlendMode is ignored if colors is null. Otherwise, it combines + - the SkShader if SkPaint contains SkShader + - or the opaque SkPaint color if SkPaint does not contain SkShader + as the src of the blend and the interpolated patch colors as the dst. + + SkMaskFilter, SkPathEffect, and antialiasing on SkPaint are ignored. + + @param cubics SkPath cubic array, sharing common points + @param colors color array, one for each corner + @param texCoords SkPoint array of texture coordinates, mapping SkShader to corners; + may be nullptr + @param mode combines patch's colors with SkShader if present or SkPaint opaque color + if not. Ignored if colors is null. + @param paint SkShader, SkColorFilter, SkBlendMode, used to draw + */ + void drawPatch(const SkPoint cubics[12], const SkColor colors[4], + const SkPoint texCoords[4], SkBlendMode mode, const SkPaint& paint); + + /** Draws a set of sprites from atlas, using clip, SkMatrix, and optional SkPaint paint. + paint uses anti-alias, alpha, SkColorFilter, SkImageFilter, and SkBlendMode + to draw, if present. For each entry in the array, SkRect tex locates sprite in + atlas, and SkRSXform xform transforms it into destination space. + + SkMaskFilter and SkPathEffect on paint are ignored. + + xform, tex, and colors if present, must contain count entries. + Optional colors are applied for each sprite using SkBlendMode mode, treating + sprite as source and colors as destination. + Optional cullRect is a conservative bounds of all transformed sprites. + If cullRect is outside of clip, canvas can skip drawing. + + If atlas is nullptr, this draws nothing. + + @param atlas SkImage containing sprites + @param xform SkRSXform mappings for sprites in atlas + @param tex SkRect locations of sprites in atlas + @param colors one per sprite, blended with sprite using SkBlendMode; may be nullptr + @param count number of sprites to draw + @param mode SkBlendMode combining colors and sprites + @param sampling SkSamplingOptions used when sampling from the atlas image + @param cullRect bounds of transformed sprites for efficient clipping; may be nullptr + @param paint SkColorFilter, SkImageFilter, SkBlendMode, and so on; may be nullptr + */ + void drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[], + const SkColor colors[], int count, SkBlendMode mode, + const SkSamplingOptions& sampling, const SkRect* cullRect, const SkPaint* paint); + + /** Draws SkDrawable drawable using clip and SkMatrix, concatenated with + optional matrix. + + If SkCanvas has an asynchronous implementation, as is the case + when it is recording into SkPicture, then drawable will be referenced, + so that SkDrawable::draw() can be called when the operation is finalized. To force + immediate drawing, call SkDrawable::draw() instead. + + @param drawable custom struct encapsulating drawing commands + @param matrix transformation applied to drawing; may be nullptr + + example: https://fiddle.skia.org/c/@Canvas_drawDrawable + */ + void drawDrawable(SkDrawable* drawable, const SkMatrix* matrix = nullptr); + + /** Draws SkDrawable drawable using clip and SkMatrix, offset by (x, y). + + If SkCanvas has an asynchronous implementation, as is the case + when it is recording into SkPicture, then drawable will be referenced, + so that SkDrawable::draw() can be called when the operation is finalized. To force + immediate drawing, call SkDrawable::draw() instead. + + @param drawable custom struct encapsulating drawing commands + @param x offset into SkCanvas writable pixels on x-axis + @param y offset into SkCanvas writable pixels on y-axis + + example: https://fiddle.skia.org/c/@Canvas_drawDrawable_2 + */ + void drawDrawable(SkDrawable* drawable, SkScalar x, SkScalar y); + + /** Associates SkRect on SkCanvas with an annotation; a key-value pair, where the key is + a null-terminated UTF-8 string, and optional value is stored as SkData. + + Only some canvas implementations, such as recording to SkPicture, or drawing to + document PDF, use annotations. + + @param rect SkRect extent of canvas to annotate + @param key string used for lookup + @param value data holding value stored in annotation + + example: https://fiddle.skia.org/c/@Canvas_drawAnnotation_2 + */ + void drawAnnotation(const SkRect& rect, const char key[], SkData* value); + + /** Associates SkRect on SkCanvas when an annotation; a key-value pair, where the key is + a null-terminated UTF-8 string, and optional value is stored as SkData. + + Only some canvas implementations, such as recording to SkPicture, or drawing to + document PDF, use annotations. + + @param rect SkRect extent of canvas to annotate + @param key string used for lookup + @param value data holding value stored in annotation + */ + void drawAnnotation(const SkRect& rect, const char key[], const sk_sp<SkData>& value) { + this->drawAnnotation(rect, key, value.get()); + } + + /** Returns true if clip is empty; that is, nothing will draw. + + May do work when called; it should not be called + more often than needed. However, once called, subsequent calls perform no + work until clip changes. + + @return true if clip is empty + + example: https://fiddle.skia.org/c/@Canvas_isClipEmpty + */ + virtual bool isClipEmpty() const; + + /** Returns true if clip is SkRect and not empty. + Returns false if the clip is empty, or if it is not SkRect. + + @return true if clip is SkRect and not empty + + example: https://fiddle.skia.org/c/@Canvas_isClipRect + */ + virtual bool isClipRect() const; + + /** Returns the current transform from local coordinates to the 'device', which for most + * purposes means pixels. + * + * @return transformation from local coordinates to device / pixels. + */ + SkM44 getLocalToDevice() const; + + /** + * Throws away the 3rd row and column in the matrix, so be warned. + */ + SkMatrix getLocalToDeviceAs3x3() const { + return this->getLocalToDevice().asM33(); + } + +#ifdef SK_SUPPORT_LEGACY_GETTOTALMATRIX + /** DEPRECATED + * Legacy version of getLocalToDevice(), which strips away any Z information, and + * just returns a 3x3 version. + * + * @return 3x3 version of getLocalToDevice() + * + * example: https://fiddle.skia.org/c/@Canvas_getTotalMatrix + * example: https://fiddle.skia.org/c/@Clip + */ + SkMatrix getTotalMatrix() const; +#endif + + /////////////////////////////////////////////////////////////////////////// + +#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) && SK_SUPPORT_GPU + // These methods exist to support WebView in Android Framework. + SkIRect topLayerBounds() const; + GrBackendRenderTarget topLayerBackendRenderTarget() const; +#endif + + /** + * Returns the global clip as a region. If the clip contains AA, then only the bounds + * of the clip may be returned. + */ + void temporary_internal_getRgnClip(SkRegion* region); + + void private_draw_shadow_rec(const SkPath&, const SkDrawShadowRec&); + + +protected: + // default impl defers to getDevice()->newSurface(info) + virtual sk_sp<SkSurface> onNewSurface(const SkImageInfo& info, const SkSurfaceProps& props); + + // default impl defers to its device + virtual bool onPeekPixels(SkPixmap* pixmap); + virtual bool onAccessTopLayerPixels(SkPixmap* pixmap); + virtual SkImageInfo onImageInfo() const; + virtual bool onGetProps(SkSurfaceProps* props) const; + virtual void onFlush(); + + // Subclass save/restore notifiers. + // Overriders should call the corresponding INHERITED method up the inheritance chain. + // getSaveLayerStrategy()'s return value may suppress full layer allocation. + enum SaveLayerStrategy { + kFullLayer_SaveLayerStrategy, + kNoLayer_SaveLayerStrategy, + }; + + virtual void willSave() {} + // Overriders should call the corresponding INHERITED method up the inheritance chain. + virtual SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec& ) { + return kFullLayer_SaveLayerStrategy; + } + + // returns true if we should actually perform the saveBehind, or false if we should just save. + virtual bool onDoSaveBehind(const SkRect*) { return true; } + virtual void willRestore() {} + virtual void didRestore() {} + + virtual void didConcat44(const SkM44&) {} + virtual void didSetM44(const SkM44&) {} + virtual void didTranslate(SkScalar, SkScalar) {} + virtual void didScale(SkScalar, SkScalar) {} + +#ifndef SK_ENABLE_EXPERIMENTAL_CUSTOM_MESH + // Define this in protected so we can still access internally for testing. + void drawCustomMesh(SkCustomMesh cm, sk_sp<SkBlender> blender, const SkPaint& paint); +#endif + + // NOTE: If you are adding a new onDraw virtual to SkCanvas, PLEASE add an override to + // SkCanvasVirtualEnforcer (in SkCanvasVirtualEnforcer.h). This ensures that subclasses using + // that mechanism will be required to implement the new function. + virtual void onDrawPaint(const SkPaint& paint); + virtual void onDrawBehind(const SkPaint& paint); + virtual void onDrawRect(const SkRect& rect, const SkPaint& paint); + virtual void onDrawRRect(const SkRRect& rrect, const SkPaint& paint); + virtual void onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint); + virtual void onDrawOval(const SkRect& rect, const SkPaint& paint); + virtual void onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle, + bool useCenter, const SkPaint& paint); + virtual void onDrawPath(const SkPath& path, const SkPaint& paint); + virtual void onDrawRegion(const SkRegion& region, const SkPaint& paint); + + virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, + const SkPaint& paint); + + virtual void onDrawGlyphRunList(const SkGlyphRunList& glyphRunList, const SkPaint& paint); + + virtual void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4], + const SkPoint texCoords[4], SkBlendMode mode, const SkPaint& paint); + virtual void onDrawPoints(PointMode mode, size_t count, const SkPoint pts[], + const SkPaint& paint); + + virtual void onDrawImage2(const SkImage*, SkScalar dx, SkScalar dy, const SkSamplingOptions&, + const SkPaint*); + virtual void onDrawImageRect2(const SkImage*, const SkRect& src, const SkRect& dst, + const SkSamplingOptions&, const SkPaint*, SrcRectConstraint); + virtual void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect& dst, + SkFilterMode, const SkPaint*); + virtual void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect src[], + const SkColor[], int count, SkBlendMode, const SkSamplingOptions&, + const SkRect* cull, const SkPaint*); + virtual void onDrawEdgeAAImageSet2(const ImageSetEntry imageSet[], int count, + const SkPoint dstClips[], const SkMatrix preViewMatrices[], + const SkSamplingOptions&, const SkPaint*, + SrcRectConstraint); + + virtual void onDrawVerticesObject(const SkVertices* vertices, SkBlendMode mode, + const SkPaint& paint); +#ifdef SK_ENABLE_SKSL + virtual void onDrawCustomMesh(SkCustomMesh, sk_sp<SkBlender>, const SkPaint&); +#endif + virtual void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value); + virtual void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&); + + virtual void onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix); + virtual void onDrawPicture(const SkPicture* picture, const SkMatrix* matrix, + const SkPaint* paint); + + virtual void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags, + const SkColor4f& color, SkBlendMode mode); + + enum ClipEdgeStyle { + kHard_ClipEdgeStyle, + kSoft_ClipEdgeStyle + }; + + virtual void onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle); + virtual void onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle); + virtual void onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle); + virtual void onClipShader(sk_sp<SkShader>, SkClipOp); + virtual void onClipRegion(const SkRegion& deviceRgn, SkClipOp op); + virtual void onResetClip(); + + virtual void onDiscard(); + +#if SK_SUPPORT_GPU + /** Experimental + */ + virtual sk_sp<GrSlug> doConvertBlobToSlug( + const SkTextBlob& blob, SkPoint origin, const SkPaint& paint); + + /** Experimental + */ + virtual void doDrawSlug(GrSlug* slug); +#endif + +private: + + enum ShaderOverrideOpacity { + kNone_ShaderOverrideOpacity, //!< there is no overriding shader (bitmap or image) + kOpaque_ShaderOverrideOpacity, //!< the overriding shader is opaque + kNotOpaque_ShaderOverrideOpacity, //!< the overriding shader may not be opaque + }; + + // notify our surface (if we have one) that we are about to draw, so it + // can perform copy-on-write or invalidate any cached images + // returns false if the copy failed + bool SK_WARN_UNUSED_RESULT predrawNotify(bool willOverwritesEntireSurface = false); + bool SK_WARN_UNUSED_RESULT predrawNotify(const SkRect*, const SkPaint*, ShaderOverrideOpacity); + + enum class CheckForOverwrite : bool { + kNo = false, + kYes = true + }; + // call the appropriate predrawNotify and create a layer if needed. + skstd::optional<AutoLayerForImageFilter> aboutToDraw( + SkCanvas* canvas, + const SkPaint& paint, + const SkRect* rawBounds = nullptr, + CheckForOverwrite = CheckForOverwrite::kNo, + ShaderOverrideOpacity = kNone_ShaderOverrideOpacity); + + // The bottom-most device in the stack, only changed by init(). Image properties and the final + // canvas pixels are determined by this device. + SkBaseDevice* baseDevice() const { + SkASSERT(fBaseDevice); + return fBaseDevice.get(); + } + + // The top-most device in the stack, will change within saveLayer()'s. All drawing and clipping + // operations should route to this device. + SkBaseDevice* topDevice() const; + + // Canvases maintain a sparse stack of layers, where the top-most layer receives the drawing, + // clip, and matrix commands. There is a layer per call to saveLayer() using the + // kFullLayer_SaveLayerStrategy. + struct Layer { + sk_sp<SkBaseDevice> fDevice; + sk_sp<SkImageFilter> fImageFilter; // applied to layer *before* being drawn by paint + SkPaint fPaint; + bool fDiscard; + + Layer(sk_sp<SkBaseDevice> device, sk_sp<SkImageFilter> imageFilter, const SkPaint& paint); + }; + + // Encapsulate state needed to restore from saveBehind() + struct BackImage { + sk_sp<SkSpecialImage> fImage; + SkIPoint fLoc; + }; + + class MCRec { + public: + // If not null, this MCRec corresponds with the saveLayer() record that made the layer. + // The base "layer" is not stored here, since it is stored inline in SkCanvas and has no + // restoration behavior. + std::unique_ptr<Layer> fLayer; + + // This points to the device of the top-most layer (which may be lower in the stack), or + // to the canvas's fBaseDevice. The MCRec does not own the device. + SkBaseDevice* fDevice; + + std::unique_ptr<BackImage> fBackImage; + SkM44 fMatrix; + int fDeferredSaveCount = 0; + + MCRec(SkBaseDevice* device); + MCRec(const MCRec* prev); + ~MCRec(); + + void newLayer(sk_sp<SkBaseDevice> layerDevice, + sk_sp<SkImageFilter> filter, + const SkPaint& restorePaint); + + void reset(SkBaseDevice* device); + }; + + SkDeque fMCStack; + // points to top of stack + MCRec* fMCRec; + + // the first N recs that can fit here mean we won't call malloc + static constexpr int kMCRecSize = 96; // most recent measurement + static constexpr int kMCRecCount = 32; // common depth for save/restores + + intptr_t fMCRecStorage[kMCRecSize * kMCRecCount / sizeof(intptr_t)]; + + // Installed via init() + sk_sp<SkBaseDevice> fBaseDevice; + const SkSurfaceProps fProps; + + int fSaveCount; // value returned by getSaveCount() + + std::unique_ptr<SkRasterHandleAllocator> fAllocator; + + SkSurface_Base* fSurfaceBase; + SkSurface_Base* getSurfaceBase() const { return fSurfaceBase; } + void setSurfaceBase(SkSurface_Base* sb) { + fSurfaceBase = sb; + } + friend class SkSurface_Base; + friend class SkSurface_Gpu; + + SkIRect fClipRestrictionRect = SkIRect::MakeEmpty(); + int fClipRestrictionSaveCount = -1; + + void doSave(); + void checkForDeferredSave(); + void internalSetMatrix(const SkM44&); + + friend class SkAndroidFrameworkUtils; + friend class SkCanvasPriv; // needs to expose android functions for testing outside android + friend class AutoLayerForImageFilter; + friend class SkSurface_Raster; // needs getDevice() + friend class SkNoDrawCanvas; // needs resetForNextPicture() + friend class SkNWayCanvas; + friend class SkPictureRecord; // predrawNotify (why does it need it? <reed>) + friend class SkOverdrawCanvas; + friend class SkRasterHandleAllocator; +protected: + // For use by SkNoDrawCanvas (via SkCanvasVirtualEnforcer, which can't be a friend) + SkCanvas(const SkIRect& bounds); +private: + SkCanvas(const SkBitmap&, std::unique_ptr<SkRasterHandleAllocator>, + SkRasterHandleAllocator::Handle); + + SkCanvas(SkCanvas&&) = delete; + SkCanvas(const SkCanvas&) = delete; + SkCanvas& operator=(SkCanvas&&) = delete; + SkCanvas& operator=(const SkCanvas&) = delete; + +#if SK_SUPPORT_GPU + friend class GrSlug; + /** Experimental + * Convert a SkTextBlob to a GrSlug using the current canvas state. + */ + sk_sp<GrSlug> convertBlobToSlug(const SkTextBlob& blob, SkPoint origin, const SkPaint& paint); + + /** Experimental + * Draw an GrSlug given the current canvas state. + */ + void drawSlug(GrSlug* slug); +#endif + + /** Experimental + * Saves the specified subset of the current pixels in the current layer, + * and then clears those pixels to transparent black. + * Restores the pixels on restore() by drawing them in SkBlendMode::kDstOver. + * + * @param subset conservative bounds of the area to be saved / restored. + * @return depth of save state stack before this call was made. + */ + int only_axis_aligned_saveBehind(const SkRect* subset); + + /** + * Like drawPaint, but magically clipped to the most recent saveBehind buffer rectangle. + * If there is no active saveBehind, then this draws nothing. + */ + void drawClippedToSaveBehind(const SkPaint&); + + void resetForNextPicture(const SkIRect& bounds); + + // needs gettotalclip() + friend class SkCanvasStateUtils; + + void init(sk_sp<SkBaseDevice>); + + // All base onDrawX() functions should call this and skip drawing if it returns true. + // If 'matrix' is non-null, it maps the paint's fast bounds before checking for quick rejection + bool internalQuickReject(const SkRect& bounds, const SkPaint& paint, + const SkMatrix* matrix = nullptr); + + void internalDrawPaint(const SkPaint& paint); + void internalSaveLayer(const SaveLayerRec&, SaveLayerStrategy); + void internalSaveBehind(const SkRect*); + + void internalConcat44(const SkM44&); + + // shared by save() and saveLayer() + void internalSave(); + void internalRestore(); + + enum class DeviceCompatibleWithFilter : bool { + // Check the src device's local-to-device matrix for compatibility with the filter, and if + // it is not compatible, introduce an intermediate image and transformation that allows the + // filter to be evaluated on the modified src content. + kUnknown = false, + // Assume that the src device's local-to-device matrix is compatible with the filter. + kYes = true + }; + /** + * Filters the contents of 'src' and draws the result into 'dst'. The filter is evaluated + * relative to the current canvas matrix, and src is drawn to dst using their relative transform + * 'paint' is applied after the filter and must not have a mask or image filter of its own. + * A null 'filter' behaves as if the identity filter were used. + * + * 'scaleFactor' is an extra uniform scale transform applied to downscale the 'src' image + * before any filtering, or as part of the copy, and is then drawn with 1/scaleFactor to 'dst'. + * Must be 1.0 if 'compat' is kYes (i.e. any scale factor has already been baked into the + * relative transforms between the devices). + */ + void internalDrawDeviceWithFilter(SkBaseDevice* src, SkBaseDevice* dst, + const SkImageFilter* filter, const SkPaint& paint, + DeviceCompatibleWithFilter compat, + SkScalar scaleFactor = 1.f); + + /* + * Returns true if drawing the specified rect (or all if it is null) with the specified + * paint (or default if null) would overwrite the entire root device of the canvas + * (i.e. the canvas' surface if it had one). + */ + bool wouldOverwriteEntireSurface(const SkRect*, const SkPaint*, ShaderOverrideOpacity) const; + + /** + * Returns true if the paint's imagefilter can be invoked directly, without needed a layer. + */ + bool canDrawBitmapAsSprite(SkScalar x, SkScalar y, int w, int h, const SkSamplingOptions&, + const SkPaint&); + + /** + * Returns true if the clip (for any active layer) contains antialiasing. + * If the clip is empty, this will return false. + */ + bool androidFramework_isClipAA() const; + + /** + * Reset the clip to be wide-open (modulo any separately specified device clip restriction). + * This operate within the save/restore clip stack so it can be undone by restoring to an + * earlier save point. + */ + void internal_private_resetClip(); + + virtual SkPaintFilterCanvas* internal_private_asPaintFilterCanvas() const { return nullptr; } + + // Keep track of the device clip bounds in the canvas' global space to reject draws before + // invoking the top-level device. + SkRect fQuickRejectBounds; + + // Compute the clip's bounds based on all clipped SkDevice's reported device bounds transformed + // into the canvas' global space. + SkRect computeDeviceClipBounds(bool outsetForAA=true) const; + + class AutoUpdateQRBounds; + void validateClip() const; + + std::unique_ptr<SkGlyphRunBuilder> fScratchGlyphRunBuilder; + + using INHERITED = SkRefCnt; +}; + +/** \class SkAutoCanvasRestore + Stack helper class calls SkCanvas::restoreToCount when SkAutoCanvasRestore + goes out of scope. Use this to guarantee that the canvas is restored to a known + state. +*/ +class SkAutoCanvasRestore { +public: + + /** Preserves SkCanvas::save() count. Optionally saves SkCanvas clip and SkCanvas matrix. + + @param canvas SkCanvas to guard + @param doSave call SkCanvas::save() + @return utility to restore SkCanvas state on destructor + */ + SkAutoCanvasRestore(SkCanvas* canvas, bool doSave) : fCanvas(canvas), fSaveCount(0) { + if (fCanvas) { + fSaveCount = canvas->getSaveCount(); + if (doSave) { + canvas->save(); + } + } + } + + /** Restores SkCanvas to saved state. Destructor is called when container goes out of + scope. + */ + ~SkAutoCanvasRestore() { + if (fCanvas) { + fCanvas->restoreToCount(fSaveCount); + } + } + + /** Restores SkCanvas to saved state immediately. Subsequent calls and + ~SkAutoCanvasRestore() have no effect. + */ + void restore() { + if (fCanvas) { + fCanvas->restoreToCount(fSaveCount); + fCanvas = nullptr; + } + } + +private: + SkCanvas* fCanvas; + int fSaveCount; + + SkAutoCanvasRestore(SkAutoCanvasRestore&&) = delete; + SkAutoCanvasRestore(const SkAutoCanvasRestore&) = delete; + SkAutoCanvasRestore& operator=(SkAutoCanvasRestore&&) = delete; + SkAutoCanvasRestore& operator=(const SkAutoCanvasRestore&) = delete; +}; + +#endif diff --git a/src/deps/skia/include/core/SkCanvasVirtualEnforcer.h b/src/deps/skia/include/core/SkCanvasVirtualEnforcer.h new file mode 100644 index 000000000..5086b4337 --- /dev/null +++ b/src/deps/skia/include/core/SkCanvasVirtualEnforcer.h @@ -0,0 +1,61 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCanvasVirtualEnforcer_DEFINED +#define SkCanvasVirtualEnforcer_DEFINED + +#include "include/core/SkCanvas.h" + +// If you would ordinarily want to inherit from Base (eg SkCanvas, SkNWayCanvas), instead +// inherit from SkCanvasVirtualEnforcer<Base>, which will make the build fail if you forget +// to override one of SkCanvas' key virtual hooks. +template <typename Base> +class SkCanvasVirtualEnforcer : public Base { +public: + using Base::Base; + +protected: + void onDrawPaint(const SkPaint& paint) override = 0; + void onDrawBehind(const SkPaint&) override {} // make zero after android updates + void onDrawRect(const SkRect& rect, const SkPaint& paint) override = 0; + void onDrawRRect(const SkRRect& rrect, const SkPaint& paint) override = 0; + void onDrawDRRect(const SkRRect& outer, const SkRRect& inner, + const SkPaint& paint) override = 0; + void onDrawOval(const SkRect& rect, const SkPaint& paint) override = 0; + void onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle, bool useCenter, + const SkPaint& paint) override = 0; + void onDrawPath(const SkPath& path, const SkPaint& paint) override = 0; + void onDrawRegion(const SkRegion& region, const SkPaint& paint) override = 0; + + void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, + const SkPaint& paint) override = 0; + + void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4], + const SkPoint texCoords[4], SkBlendMode mode, + const SkPaint& paint) override = 0; + void onDrawPoints(SkCanvas::PointMode mode, size_t count, const SkPoint pts[], + const SkPaint& paint) override = 0; + +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + // This is under active development for Chrome and not used in Android. Hold off on adding + // implementations in Android's SkCanvas subclasses until this stabilizes. + void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], + SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color, SkBlendMode mode) override {} +#else + void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], + SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color, SkBlendMode mode) override = 0; +#endif + + void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) override = 0; + void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override = 0; + + void onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) override = 0; + void onDrawPicture(const SkPicture* picture, const SkMatrix* matrix, + const SkPaint* paint) override = 0; +}; + +#endif diff --git a/src/deps/skia/include/core/SkClipOp.h b/src/deps/skia/include/core/SkClipOp.h new file mode 100644 index 000000000..3da6c6113 --- /dev/null +++ b/src/deps/skia/include/core/SkClipOp.h @@ -0,0 +1,19 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkClipOp_DEFINED +#define SkClipOp_DEFINED + +#include "include/core/SkTypes.h" + +enum class SkClipOp { + kDifference = 0, + kIntersect = 1, + kMax_EnumValue = kIntersect +}; + +#endif diff --git a/src/deps/skia/include/core/SkColor.h b/src/deps/skia/include/core/SkColor.h new file mode 100644 index 000000000..9cba771dd --- /dev/null +++ b/src/deps/skia/include/core/SkColor.h @@ -0,0 +1,438 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkColor_DEFINED +#define SkColor_DEFINED + +#include "include/core/SkImageInfo.h" +#include "include/core/SkScalar.h" +#include "include/core/SkTypes.h" + +#include <array> + +/** \file SkColor.h + + Types, consts, functions, and macros for colors. +*/ + +/** 8-bit type for an alpha value. 255 is 100% opaque, zero is 100% transparent. +*/ +typedef uint8_t SkAlpha; + +/** 32-bit ARGB color value, unpremultiplied. Color components are always in + a known order. This is different from SkPMColor, which has its bytes in a configuration + dependent order, to match the format of kBGRA_8888_SkColorType bitmaps. SkColor + is the type used to specify colors in SkPaint and in gradients. + + Color that is premultiplied has the same component values as color + that is unpremultiplied if alpha is 255, fully opaque, although may have the + component values in a different order. +*/ +typedef uint32_t SkColor; + +/** Returns color value from 8-bit component values. Asserts if SK_DEBUG is defined + if a, r, g, or b exceed 255. Since color is unpremultiplied, a may be smaller + than the largest of r, g, and b. + + @param a amount of alpha, from fully transparent (0) to fully opaque (255) + @param r amount of red, from no red (0) to full red (255) + @param g amount of green, from no green (0) to full green (255) + @param b amount of blue, from no blue (0) to full blue (255) + @return color and alpha, unpremultiplied +*/ +static constexpr inline SkColor SkColorSetARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { + return SkASSERT(a <= 255 && r <= 255 && g <= 255 && b <= 255), + (a << 24) | (r << 16) | (g << 8) | (b << 0); +} + +/** Returns color value from 8-bit component values, with alpha set + fully opaque to 255. +*/ +#define SkColorSetRGB(r, g, b) SkColorSetARGB(0xFF, r, g, b) + +/** Returns alpha byte from color value. +*/ +#define SkColorGetA(color) (((color) >> 24) & 0xFF) + +/** Returns red component of color, from zero to 255. +*/ +#define SkColorGetR(color) (((color) >> 16) & 0xFF) + +/** Returns green component of color, from zero to 255. +*/ +#define SkColorGetG(color) (((color) >> 8) & 0xFF) + +/** Returns blue component of color, from zero to 255. +*/ +#define SkColorGetB(color) (((color) >> 0) & 0xFF) + +/** Returns unpremultiplied color with red, blue, and green set from c; and alpha set + from a. Alpha component of c is ignored and is replaced by a in result. + + @param c packed RGB, eight bits per component + @param a alpha: transparent at zero, fully opaque at 255 + @return color with transparency +*/ +static constexpr inline SkColor SK_WARN_UNUSED_RESULT SkColorSetA(SkColor c, U8CPU a) { + return (c & 0x00FFFFFF) | (a << 24); +} + +/** Represents fully transparent SkAlpha value. SkAlpha ranges from zero, + fully transparent; to 255, fully opaque. +*/ +constexpr SkAlpha SK_AlphaTRANSPARENT = 0x00; + +/** Represents fully opaque SkAlpha value. SkAlpha ranges from zero, + fully transparent; to 255, fully opaque. +*/ +constexpr SkAlpha SK_AlphaOPAQUE = 0xFF; + +/** Represents fully transparent SkColor. May be used to initialize a destination + containing a mask or a non-rectangular image. +*/ +constexpr SkColor SK_ColorTRANSPARENT = SkColorSetARGB(0x00, 0x00, 0x00, 0x00); + +/** Represents fully opaque black. +*/ +constexpr SkColor SK_ColorBLACK = SkColorSetARGB(0xFF, 0x00, 0x00, 0x00); + +/** Represents fully opaque dark gray. + Note that SVG dark gray is equivalent to 0xFFA9A9A9. +*/ +constexpr SkColor SK_ColorDKGRAY = SkColorSetARGB(0xFF, 0x44, 0x44, 0x44); + +/** Represents fully opaque gray. + Note that HTML gray is equivalent to 0xFF808080. +*/ +constexpr SkColor SK_ColorGRAY = SkColorSetARGB(0xFF, 0x88, 0x88, 0x88); + +/** Represents fully opaque light gray. HTML silver is equivalent to 0xFFC0C0C0. + Note that SVG light gray is equivalent to 0xFFD3D3D3. +*/ +constexpr SkColor SK_ColorLTGRAY = SkColorSetARGB(0xFF, 0xCC, 0xCC, 0xCC); + +/** Represents fully opaque white. +*/ +constexpr SkColor SK_ColorWHITE = SkColorSetARGB(0xFF, 0xFF, 0xFF, 0xFF); + +/** Represents fully opaque red. +*/ +constexpr SkColor SK_ColorRED = SkColorSetARGB(0xFF, 0xFF, 0x00, 0x00); + +/** Represents fully opaque green. HTML lime is equivalent. + Note that HTML green is equivalent to 0xFF008000. +*/ +constexpr SkColor SK_ColorGREEN = SkColorSetARGB(0xFF, 0x00, 0xFF, 0x00); + +/** Represents fully opaque blue. +*/ +constexpr SkColor SK_ColorBLUE = SkColorSetARGB(0xFF, 0x00, 0x00, 0xFF); + +/** Represents fully opaque yellow. +*/ +constexpr SkColor SK_ColorYELLOW = SkColorSetARGB(0xFF, 0xFF, 0xFF, 0x00); + +/** Represents fully opaque cyan. HTML aqua is equivalent. +*/ +constexpr SkColor SK_ColorCYAN = SkColorSetARGB(0xFF, 0x00, 0xFF, 0xFF); + +/** Represents fully opaque magenta. HTML fuchsia is equivalent. +*/ +constexpr SkColor SK_ColorMAGENTA = SkColorSetARGB(0xFF, 0xFF, 0x00, 0xFF); + +/** Converts RGB to its HSV components. + hsv[0] contains hsv hue, a value from zero to less than 360. + hsv[1] contains hsv saturation, a value from zero to one. + hsv[2] contains hsv value, a value from zero to one. + + @param red red component value from zero to 255 + @param green green component value from zero to 255 + @param blue blue component value from zero to 255 + @param hsv three element array which holds the resulting HSV components +*/ +SK_API void SkRGBToHSV(U8CPU red, U8CPU green, U8CPU blue, SkScalar hsv[3]); + +/** Converts ARGB to its HSV components. Alpha in ARGB is ignored. + hsv[0] contains hsv hue, and is assigned a value from zero to less than 360. + hsv[1] contains hsv saturation, a value from zero to one. + hsv[2] contains hsv value, a value from zero to one. + + @param color ARGB color to convert + @param hsv three element array which holds the resulting HSV components +*/ +static inline void SkColorToHSV(SkColor color, SkScalar hsv[3]) { + SkRGBToHSV(SkColorGetR(color), SkColorGetG(color), SkColorGetB(color), hsv); +} + +/** Converts HSV components to an ARGB color. Alpha is passed through unchanged. + hsv[0] represents hsv hue, an angle from zero to less than 360. + hsv[1] represents hsv saturation, and varies from zero to one. + hsv[2] represents hsv value, and varies from zero to one. + + Out of range hsv values are pinned. + + @param alpha alpha component of the returned ARGB color + @param hsv three element array which holds the input HSV components + @return ARGB equivalent to HSV +*/ +SK_API SkColor SkHSVToColor(U8CPU alpha, const SkScalar hsv[3]); + +/** Converts HSV components to an ARGB color. Alpha is set to 255. + hsv[0] represents hsv hue, an angle from zero to less than 360. + hsv[1] represents hsv saturation, and varies from zero to one. + hsv[2] represents hsv value, and varies from zero to one. + + Out of range hsv values are pinned. + + @param hsv three element array which holds the input HSV components + @return RGB equivalent to HSV +*/ +static inline SkColor SkHSVToColor(const SkScalar hsv[3]) { + return SkHSVToColor(0xFF, hsv); +} + +/** 32-bit ARGB color value, premultiplied. The byte order for this value is + configuration dependent, matching the format of kBGRA_8888_SkColorType bitmaps. + This is different from SkColor, which is unpremultiplied, and is always in the + same byte order. +*/ +typedef uint32_t SkPMColor; + +/** Returns a SkPMColor value from unpremultiplied 8-bit component values. + + @param a amount of alpha, from fully transparent (0) to fully opaque (255) + @param r amount of red, from no red (0) to full red (255) + @param g amount of green, from no green (0) to full green (255) + @param b amount of blue, from no blue (0) to full blue (255) + @return premultiplied color +*/ +SK_API SkPMColor SkPreMultiplyARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b); + +/** Returns pmcolor closest to color c. Multiplies c RGB components by the c alpha, + and arranges the bytes to match the format of kN32_SkColorType. + + @param c unpremultiplied ARGB color + @return premultiplied color +*/ +SK_API SkPMColor SkPreMultiplyColor(SkColor c); + +/** \enum SkColorChannel + Describes different color channels one can manipulate +*/ +enum class SkColorChannel { + kR, // the red channel + kG, // the green channel + kB, // the blue channel + kA, // the alpha channel + + kLastEnum = kA, +}; + +/** Used to represent the channels available in a color type or texture format as a mask. */ +enum SkColorChannelFlag : uint32_t { + kRed_SkColorChannelFlag = 1 << static_cast<uint32_t>(SkColorChannel::kR), + kGreen_SkColorChannelFlag = 1 << static_cast<uint32_t>(SkColorChannel::kG), + kBlue_SkColorChannelFlag = 1 << static_cast<uint32_t>(SkColorChannel::kB), + kAlpha_SkColorChannelFlag = 1 << static_cast<uint32_t>(SkColorChannel::kA), + kGray_SkColorChannelFlag = 0x10, + // Convenience values + kGrayAlpha_SkColorChannelFlags = kGray_SkColorChannelFlag | kAlpha_SkColorChannelFlag, + kRG_SkColorChannelFlags = kRed_SkColorChannelFlag | kGreen_SkColorChannelFlag, + kRGB_SkColorChannelFlags = kRG_SkColorChannelFlags | kBlue_SkColorChannelFlag, + kRGBA_SkColorChannelFlags = kRGB_SkColorChannelFlags | kAlpha_SkColorChannelFlag, +}; +static_assert(0 == (kGray_SkColorChannelFlag & kRGBA_SkColorChannelFlags), "bitfield conflict"); + +/** \struct SkRGBA4f + RGBA color value, holding four floating point components. Color components are always in + a known order. kAT determines if the SkRGBA4f's R, G, and B components are premultiplied + by alpha or not. + + Skia's public API always uses unpremultiplied colors, which can be stored as + SkRGBA4f<kUnpremul_SkAlphaType>. For convenience, this type can also be referred to + as SkColor4f. +*/ +template <SkAlphaType kAT> +struct SkRGBA4f { + float fR; //!< red component + float fG; //!< green component + float fB; //!< blue component + float fA; //!< alpha component + + /** Compares SkRGBA4f with other, and returns true if all components are equal. + + @param other SkRGBA4f to compare + @return true if SkRGBA4f equals other + */ + bool operator==(const SkRGBA4f& other) const { + return fA == other.fA && fR == other.fR && fG == other.fG && fB == other.fB; + } + + /** Compares SkRGBA4f with other, and returns true if not all components are equal. + + @param other SkRGBA4f to compare + @return true if SkRGBA4f is not equal to other + */ + bool operator!=(const SkRGBA4f& other) const { + return !(*this == other); + } + + /** Returns SkRGBA4f multiplied by scale. + + @param scale value to multiply by + @return SkRGBA4f as (fR * scale, fG * scale, fB * scale, fA * scale) + */ + SkRGBA4f operator*(float scale) const { + return { fR * scale, fG * scale, fB * scale, fA * scale }; + } + + /** Returns SkRGBA4f multiplied component-wise by scale. + + @param scale SkRGBA4f to multiply by + @return SkRGBA4f as (fR * scale.fR, fG * scale.fG, fB * scale.fB, fA * scale.fA) + */ + SkRGBA4f operator*(const SkRGBA4f& scale) const { + return { fR * scale.fR, fG * scale.fG, fB * scale.fB, fA * scale.fA }; + } + + /** Returns a pointer to components of SkRGBA4f, for array access. + + @return pointer to array [fR, fG, fB, fA] + */ + const float* vec() const { return &fR; } + + /** Returns a pointer to components of SkRGBA4f, for array access. + + @return pointer to array [fR, fG, fB, fA] + */ + float* vec() { return &fR; } + + /** As a std::array<float, 4> */ + std::array<float, 4> array() const { return {fR, fG, fB, fA}; } + + /** Returns one component. Asserts if index is out of range and SK_DEBUG is defined. + + @param index one of: 0 (fR), 1 (fG), 2 (fB), 3 (fA) + @return value corresponding to index + */ + float operator[](int index) const { + SkASSERT(index >= 0 && index < 4); + return this->vec()[index]; + } + + /** Returns one component. Asserts if index is out of range and SK_DEBUG is defined. + + @param index one of: 0 (fR), 1 (fG), 2 (fB), 3 (fA) + @return value corresponding to index + */ + float& operator[](int index) { + SkASSERT(index >= 0 && index < 4); + return this->vec()[index]; + } + + /** Returns true if SkRGBA4f is an opaque color. Asserts if fA is out of range and + SK_DEBUG is defined. + + @return true if SkRGBA4f is opaque + */ + bool isOpaque() const { + SkASSERT(fA <= 1.0f && fA >= 0.0f); + return fA == 1.0f; + } + + /** Returns true if all channels are in [0, 1]. */ + bool fitsInBytes() const { + SkASSERT(fA >= 0.0f && fA <= 1.0f); + return fR >= 0.0f && fR <= 1.0f && + fG >= 0.0f && fG <= 1.0f && + fB >= 0.0f && fB <= 1.0f; + } + + /** Returns closest SkRGBA4f to SkColor. Only allowed if SkRGBA4f is unpremultiplied. + + @param color Color with Alpha, red, blue, and green components + @return SkColor as SkRGBA4f + + example: https://fiddle.skia.org/c/@RGBA4f_FromColor + */ + static SkRGBA4f FromColor(SkColor color); // impl. depends on kAT + + /** Returns closest SkColor to SkRGBA4f. Only allowed if SkRGBA4f is unpremultiplied. + + @return color as SkColor + + example: https://fiddle.skia.org/c/@RGBA4f_toSkColor + */ + SkColor toSkColor() const; // impl. depends on kAT + + /** Returns closest SkRGBA4f to SkPMColor. Only allowed if SkRGBA4f is premultiplied. + + @return SkPMColor as SkRGBA4f + */ + static SkRGBA4f FromPMColor(SkPMColor); // impl. depends on kAT + + /** Returns SkRGBA4f premultiplied by alpha. Asserts at compile time if SkRGBA4f is + already premultiplied. + + @return premultiplied color + */ + SkRGBA4f<kPremul_SkAlphaType> premul() const { + static_assert(kAT == kUnpremul_SkAlphaType, ""); + return { fR * fA, fG * fA, fB * fA, fA }; + } + + /** Returns SkRGBA4f unpremultiplied by alpha. Asserts at compile time if SkRGBA4f is + already unpremultiplied. + + @return unpremultiplied color + */ + SkRGBA4f<kUnpremul_SkAlphaType> unpremul() const { + static_assert(kAT == kPremul_SkAlphaType, ""); + + if (fA == 0.0f) { + return { 0, 0, 0, 0 }; + } else { + float invAlpha = 1 / fA; + return { fR * invAlpha, fG * invAlpha, fB * invAlpha, fA }; + } + } + + // This produces bytes in RGBA order (eg GrColor). Impl. is the same, regardless of kAT + uint32_t toBytes_RGBA() const; + static SkRGBA4f FromBytes_RGBA(uint32_t color); + + SkRGBA4f makeOpaque() const { + return { fR, fG, fB, 1.0f }; + } +}; + +/** \struct SkColor4f + RGBA color value, holding four floating point components. Color components are always in + a known order, and are unpremultiplied. + + This is a specialization of SkRGBA4f. For details, @see SkRGBA4f. +*/ +using SkColor4f = SkRGBA4f<kUnpremul_SkAlphaType>; + +template <> SK_API SkColor4f SkColor4f::FromColor(SkColor); +template <> SK_API SkColor SkColor4f::toSkColor() const; + +namespace SkColors { +constexpr SkColor4f kTransparent = {0, 0, 0, 0}; +constexpr SkColor4f kBlack = {0, 0, 0, 1}; +constexpr SkColor4f kDkGray = {0.25f, 0.25f, 0.25f, 1}; +constexpr SkColor4f kGray = {0.50f, 0.50f, 0.50f, 1}; +constexpr SkColor4f kLtGray = {0.75f, 0.75f, 0.75f, 1}; +constexpr SkColor4f kWhite = {1, 1, 1, 1}; +constexpr SkColor4f kRed = {1, 0, 0, 1}; +constexpr SkColor4f kGreen = {0, 1, 0, 1}; +constexpr SkColor4f kBlue = {0, 0, 1, 1}; +constexpr SkColor4f kYellow = {1, 1, 0, 1}; +constexpr SkColor4f kCyan = {0, 1, 1, 1}; +constexpr SkColor4f kMagenta = {1, 0, 1, 1}; +} // namespace SkColors +#endif diff --git a/src/deps/skia/include/core/SkColorFilter.h b/src/deps/skia/include/core/SkColorFilter.h new file mode 100644 index 000000000..e949b24ac --- /dev/null +++ b/src/deps/skia/include/core/SkColorFilter.h @@ -0,0 +1,90 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkColorFilter_DEFINED +#define SkColorFilter_DEFINED + +#include "include/core/SkBlendMode.h" +#include "include/core/SkColor.h" +#include "include/core/SkFlattenable.h" + +class SkColorMatrix; + +/** +* ColorFilters are optional objects in the drawing pipeline. When present in +* a paint, they are called with the "src" colors, and return new colors, which +* are then passed onto the next stage (either ImageFilter or Xfermode). +* +* All subclasses are required to be reentrant-safe : it must be legal to share +* the same instance between several threads. +*/ +class SK_API SkColorFilter : public SkFlattenable { +public: + /** If the filter can be represented by a source color plus Mode, this + * returns true, and sets (if not NULL) the color and mode appropriately. + * If not, this returns false and ignores the parameters. + */ + bool asAColorMode(SkColor* color, SkBlendMode* mode) const; + + /** If the filter can be represented by a 5x4 matrix, this + * returns true, and sets the matrix appropriately. + * If not, this returns false and ignores the parameter. + */ + bool asAColorMatrix(float matrix[20]) const; + + // Returns true if the filter is guaranteed to never change the alpha of a color it filters. + bool isAlphaUnchanged() const; + + SkColor filterColor(SkColor) const; + + /** + * Converts the src color (in src colorspace), into the dst colorspace, + * then applies this filter to it, returning the filtered color in the dst colorspace. + */ + SkColor4f filterColor4f(const SkColor4f& srcColor, SkColorSpace* srcCS, + SkColorSpace* dstCS) const; + + /** Construct a colorfilter whose effect is to first apply the inner filter and then apply + * this filter, applied to the output of the inner filter. + * + * result = this(inner(...)) + */ + sk_sp<SkColorFilter> makeComposed(sk_sp<SkColorFilter> inner) const; + + static sk_sp<SkColorFilter> Deserialize(const void* data, size_t size, + const SkDeserialProcs* procs = nullptr); + +private: + SkColorFilter() = default; + friend class SkColorFilterBase; + + using INHERITED = SkFlattenable; +}; + +class SK_API SkColorFilters { +public: + static sk_sp<SkColorFilter> Compose(sk_sp<SkColorFilter> outer, sk_sp<SkColorFilter> inner) { + return outer ? outer->makeComposed(inner) : inner; + } + static sk_sp<SkColorFilter> Blend(SkColor c, SkBlendMode mode); + static sk_sp<SkColorFilter> Matrix(const SkColorMatrix&); + static sk_sp<SkColorFilter> Matrix(const float rowMajor[20]); + + // A version of Matrix which operates in HSLA space instead of RGBA. + // I.e. HSLA-to-RGBA(Matrix(RGBA-to-HSLA(input))). + static sk_sp<SkColorFilter> HSLAMatrix(const SkColorMatrix&); + static sk_sp<SkColorFilter> HSLAMatrix(const float rowMajor[20]); + + static sk_sp<SkColorFilter> LinearToSRGBGamma(); + static sk_sp<SkColorFilter> SRGBToLinearGamma(); + static sk_sp<SkColorFilter> Lerp(float t, sk_sp<SkColorFilter> dst, sk_sp<SkColorFilter> src); + +private: + SkColorFilters() = delete; +}; + +#endif diff --git a/src/deps/skia/include/core/SkColorPriv.h b/src/deps/skia/include/core/SkColorPriv.h new file mode 100644 index 000000000..29f64339b --- /dev/null +++ b/src/deps/skia/include/core/SkColorPriv.h @@ -0,0 +1,152 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkColorPriv_DEFINED +#define SkColorPriv_DEFINED + +#include "include/core/SkColor.h" +#include "include/core/SkMath.h" +#include "include/private/SkTPin.h" +#include "include/private/SkTo.h" + +/** Turn 0..255 into 0..256 by adding 1 at the half-way point. Used to turn a + byte into a scale value, so that we can say scale * value >> 8 instead of + alpha * value / 255. + + In debugging, asserts that alpha is 0..255 +*/ +static inline unsigned SkAlpha255To256(U8CPU alpha) { + SkASSERT(SkToU8(alpha) == alpha); + // this one assues that blending on top of an opaque dst keeps it that way + // even though it is less accurate than a+(a>>7) for non-opaque dsts + return alpha + 1; +} + +/** Multiplify value by 0..256, and shift the result down 8 + (i.e. return (value * alpha256) >> 8) + */ +#define SkAlphaMul(value, alpha256) (((value) * (alpha256)) >> 8) + +static inline U8CPU SkUnitScalarClampToByte(SkScalar x) { + return static_cast<U8CPU>(SkTPin(x, 0.0f, 1.0f) * 255 + 0.5); +} + +#define SK_A32_BITS 8 +#define SK_R32_BITS 8 +#define SK_G32_BITS 8 +#define SK_B32_BITS 8 + +#define SK_A32_MASK ((1 << SK_A32_BITS) - 1) +#define SK_R32_MASK ((1 << SK_R32_BITS) - 1) +#define SK_G32_MASK ((1 << SK_G32_BITS) - 1) +#define SK_B32_MASK ((1 << SK_B32_BITS) - 1) + +/* + * Skia's 32bit backend only supports 1 swizzle order at a time (compile-time). + * This is specified by SK_R32_SHIFT=0 or SK_R32_SHIFT=16. + * + * For easier compatibility with Skia's GPU backend, we further restrict these + * to either (in memory-byte-order) RGBA or BGRA. Note that this "order" does + * not directly correspond to the same shift-order, since we have to take endianess + * into account. + * + * Here we enforce this constraint. + */ + +#define SK_RGBA_R32_SHIFT 0 +#define SK_RGBA_G32_SHIFT 8 +#define SK_RGBA_B32_SHIFT 16 +#define SK_RGBA_A32_SHIFT 24 + +#define SK_BGRA_B32_SHIFT 0 +#define SK_BGRA_G32_SHIFT 8 +#define SK_BGRA_R32_SHIFT 16 +#define SK_BGRA_A32_SHIFT 24 + +#if defined(SK_PMCOLOR_IS_RGBA) || defined(SK_PMCOLOR_IS_BGRA) + #error "Configure PMCOLOR by setting SK_R32_SHIFT." +#endif + +// Deduce which SK_PMCOLOR_IS_ to define from the _SHIFT defines + +#if (SK_A32_SHIFT == SK_RGBA_A32_SHIFT && \ + SK_R32_SHIFT == SK_RGBA_R32_SHIFT && \ + SK_G32_SHIFT == SK_RGBA_G32_SHIFT && \ + SK_B32_SHIFT == SK_RGBA_B32_SHIFT) + #define SK_PMCOLOR_IS_RGBA +#elif (SK_A32_SHIFT == SK_BGRA_A32_SHIFT && \ + SK_R32_SHIFT == SK_BGRA_R32_SHIFT && \ + SK_G32_SHIFT == SK_BGRA_G32_SHIFT && \ + SK_B32_SHIFT == SK_BGRA_B32_SHIFT) + #define SK_PMCOLOR_IS_BGRA +#else + #error "need 32bit packing to be either RGBA or BGRA" +#endif + +#define SkGetPackedA32(packed) ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24) +#define SkGetPackedR32(packed) ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24) +#define SkGetPackedG32(packed) ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24) +#define SkGetPackedB32(packed) ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24) + +#define SkA32Assert(a) SkASSERT((unsigned)(a) <= SK_A32_MASK) +#define SkR32Assert(r) SkASSERT((unsigned)(r) <= SK_R32_MASK) +#define SkG32Assert(g) SkASSERT((unsigned)(g) <= SK_G32_MASK) +#define SkB32Assert(b) SkASSERT((unsigned)(b) <= SK_B32_MASK) + +/** + * Pack the components into a SkPMColor, checking (in the debug version) that + * the components are 0..255, and are already premultiplied (i.e. alpha >= color) + */ +static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { + SkA32Assert(a); + SkASSERT(r <= a); + SkASSERT(g <= a); + SkASSERT(b <= a); + + return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) | + (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT); +} + +/** + * Same as SkPackARGB32, but this version guarantees to not check that the + * values are premultiplied in the debug version. + */ +static inline SkPMColor SkPackARGB32NoCheck(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { + return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) | + (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT); +} + +static inline +SkPMColor SkPremultiplyARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { + SkA32Assert(a); + SkR32Assert(r); + SkG32Assert(g); + SkB32Assert(b); + + if (a != 255) { + r = SkMulDiv255Round(r, a); + g = SkMulDiv255Round(g, a); + b = SkMulDiv255Round(b, a); + } + return SkPackARGB32(a, r, g, b); +} + +// When Android is compiled optimizing for size, SkAlphaMulQ doesn't get +// inlined; forcing inlining significantly improves performance. +static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) { + uint32_t mask = 0xFF00FF; + + uint32_t rb = ((c & mask) * scale) >> 8; + uint32_t ag = ((c >> 8) & mask) * scale; + return (rb & mask) | (ag & ~mask); +} + +static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) { + return src + SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src))); +} + +#endif diff --git a/src/deps/skia/include/core/SkColorSpace.h b/src/deps/skia/include/core/SkColorSpace.h new file mode 100644 index 000000000..9efd140e2 --- /dev/null +++ b/src/deps/skia/include/core/SkColorSpace.h @@ -0,0 +1,245 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkColorSpace_DEFINED +#define SkColorSpace_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/private/SkFixed.h" +#include "include/private/SkOnce.h" +#include "include/third_party/skcms/skcms.h" +#include <memory> + +class SkData; + +/** + * Describes a color gamut with primaries and a white point. + */ +struct SK_API SkColorSpacePrimaries { + float fRX; + float fRY; + float fGX; + float fGY; + float fBX; + float fBY; + float fWX; + float fWY; + + /** + * Convert primaries and a white point to a toXYZD50 matrix, the preferred color gamut + * representation of SkColorSpace. + */ + bool toXYZD50(skcms_Matrix3x3* toXYZD50) const; +}; + +namespace SkNamedTransferFn { + +// Like SkNamedGamut::kSRGB, keeping this bitwise exactly the same as skcms makes things fastest. +static constexpr skcms_TransferFunction kSRGB = + { 2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0.0f, 0.0f }; + +static constexpr skcms_TransferFunction k2Dot2 = + { 2.2f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; + +static constexpr skcms_TransferFunction kLinear = + { 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; + +static constexpr skcms_TransferFunction kRec2020 = + {2.22222f, 0.909672f, 0.0903276f, 0.222222f, 0.0812429f, 0, 0}; + +static constexpr skcms_TransferFunction kPQ = + {-2.0f, -107/128.0f, 1.0f, 32/2523.0f, 2413/128.0f, -2392/128.0f, 8192/1305.0f }; + +static constexpr skcms_TransferFunction kHLG = + {-3.0f, 2.0f, 2.0f, 1/0.17883277f, 0.28466892f, 0.55991073f, 0.0f }; + +} // namespace SkNamedTransferFn + +namespace SkNamedGamut { + +static constexpr skcms_Matrix3x3 kSRGB = {{ + // ICC fixed-point (16.16) representation, taken from skcms. Please keep them exactly in sync. + // 0.436065674f, 0.385147095f, 0.143066406f, + // 0.222488403f, 0.716873169f, 0.060607910f, + // 0.013916016f, 0.097076416f, 0.714096069f, + { SkFixedToFloat(0x6FA2), SkFixedToFloat(0x6299), SkFixedToFloat(0x24A0) }, + { SkFixedToFloat(0x38F5), SkFixedToFloat(0xB785), SkFixedToFloat(0x0F84) }, + { SkFixedToFloat(0x0390), SkFixedToFloat(0x18DA), SkFixedToFloat(0xB6CF) }, +}}; + +static constexpr skcms_Matrix3x3 kAdobeRGB = {{ + // ICC fixed-point (16.16) repesentation of: + // 0.60974, 0.20528, 0.14919, + // 0.31111, 0.62567, 0.06322, + // 0.01947, 0.06087, 0.74457, + { SkFixedToFloat(0x9c18), SkFixedToFloat(0x348d), SkFixedToFloat(0x2631) }, + { SkFixedToFloat(0x4fa5), SkFixedToFloat(0xa02c), SkFixedToFloat(0x102f) }, + { SkFixedToFloat(0x04fc), SkFixedToFloat(0x0f95), SkFixedToFloat(0xbe9c) }, +}}; + +static constexpr skcms_Matrix3x3 kDisplayP3 = {{ + { 0.515102f, 0.291965f, 0.157153f }, + { 0.241182f, 0.692236f, 0.0665819f }, + { -0.00104941f, 0.0418818f, 0.784378f }, +}}; + +static constexpr skcms_Matrix3x3 kRec2020 = {{ + { 0.673459f, 0.165661f, 0.125100f }, + { 0.279033f, 0.675338f, 0.0456288f }, + { -0.00193139f, 0.0299794f, 0.797162f }, +}}; + +static constexpr skcms_Matrix3x3 kXYZ = {{ + { 1.0f, 0.0f, 0.0f }, + { 0.0f, 1.0f, 0.0f }, + { 0.0f, 0.0f, 1.0f }, +}}; + +} // namespace SkNamedGamut + +class SK_API SkColorSpace : public SkNVRefCnt<SkColorSpace> { +public: + /** + * Create the sRGB color space. + */ + static sk_sp<SkColorSpace> MakeSRGB(); + + /** + * Colorspace with the sRGB primaries, but a linear (1.0) gamma. + */ + static sk_sp<SkColorSpace> MakeSRGBLinear(); + + /** + * Create an SkColorSpace from a transfer function and a row-major 3x3 transformation to XYZ. + */ + static sk_sp<SkColorSpace> MakeRGB(const skcms_TransferFunction& transferFn, + const skcms_Matrix3x3& toXYZ); + + /** + * Create an SkColorSpace from a parsed (skcms) ICC profile. + */ + static sk_sp<SkColorSpace> Make(const skcms_ICCProfile&); + + /** + * Convert this color space to an skcms ICC profile struct. + */ + void toProfile(skcms_ICCProfile*) const; + + /** + * Returns true if the color space gamma is near enough to be approximated as sRGB. + */ + bool gammaCloseToSRGB() const; + + /** + * Returns true if the color space gamma is linear. + */ + bool gammaIsLinear() const; + + /** + * Sets |fn| to the transfer function from this color space. Returns true if the transfer + * function can be represented as coefficients to the standard ICC 7-parameter equation. + * Returns false otherwise (eg, PQ, HLG). + */ + bool isNumericalTransferFn(skcms_TransferFunction* fn) const; + + /** + * Returns true and sets |toXYZD50| if the color gamut can be described as a matrix. + * Returns false otherwise. + */ + bool toXYZD50(skcms_Matrix3x3* toXYZD50) const; + + /** + * Returns a hash of the gamut transformation to XYZ D50. Allows for fast equality checking + * of gamuts, at the (very small) risk of collision. + */ + uint32_t toXYZD50Hash() const { return fToXYZD50Hash; } + + /** + * Returns a color space with the same gamut as this one, but with a linear gamma. + * For color spaces whose gamut can not be described in terms of XYZ D50, returns + * linear sRGB. + */ + sk_sp<SkColorSpace> makeLinearGamma() const; + + /** + * Returns a color space with the same gamut as this one, with with the sRGB transfer + * function. For color spaces whose gamut can not be described in terms of XYZ D50, returns + * sRGB. + */ + sk_sp<SkColorSpace> makeSRGBGamma() const; + + /** + * Returns a color space with the same transfer function as this one, but with the primary + * colors rotated. For any XYZ space, this produces a new color space that maps RGB to GBR + * (when applied to a source), and maps RGB to BRG (when applied to a destination). For other + * types of color spaces, returns nullptr. + * + * This is used for testing, to construct color spaces that have severe and testable behavior. + */ + sk_sp<SkColorSpace> makeColorSpin() const; + + /** + * Returns true if the color space is sRGB. + * Returns false otherwise. + * + * This allows a little bit of tolerance, given that we might see small numerical error + * in some cases: converting ICC fixed point to float, converting white point to D50, + * rounding decisions on transfer function and matrix. + * + * This does not consider a 2.2f exponential transfer function to be sRGB. While these + * functions are similar (and it is sometimes useful to consider them together), this + * function checks for logical equality. + */ + bool isSRGB() const; + + /** + * Returns nullptr on failure. Fails when we fallback to serializing ICC data and + * the data is too large to serialize. + */ + sk_sp<SkData> serialize() const; + + /** + * If |memory| is nullptr, returns the size required to serialize. + * Otherwise, serializes into |memory| and returns the size. + */ + size_t writeToMemory(void* memory) const; + + static sk_sp<SkColorSpace> Deserialize(const void* data, size_t length); + + /** + * If both are null, we return true. If one is null and the other is not, we return false. + * If both are non-null, we do a deeper compare. + */ + static bool Equals(const SkColorSpace*, const SkColorSpace*); + + void transferFn(float gabcdef[7]) const; // DEPRECATED: Remove when webview usage is gone + void transferFn(skcms_TransferFunction* fn) const; + void invTransferFn(skcms_TransferFunction* fn) const; + void gamutTransformTo(const SkColorSpace* dst, skcms_Matrix3x3* src_to_dst) const; + + uint32_t transferFnHash() const { return fTransferFnHash; } + uint64_t hash() const { return (uint64_t)fTransferFnHash << 32 | fToXYZD50Hash; } + +private: + friend class SkColorSpaceSingletonFactory; + + SkColorSpace(const skcms_TransferFunction& transferFn, const skcms_Matrix3x3& toXYZ); + + void computeLazyDstFields() const; + + uint32_t fTransferFnHash; + uint32_t fToXYZD50Hash; + + skcms_TransferFunction fTransferFn; + skcms_Matrix3x3 fToXYZD50; + + mutable skcms_TransferFunction fInvTransferFn; + mutable skcms_Matrix3x3 fFromXYZD50; + mutable SkOnce fLazyDstFieldsOnce; +}; + +#endif diff --git a/src/deps/skia/include/core/SkContourMeasure.h b/src/deps/skia/include/core/SkContourMeasure.h new file mode 100644 index 000000000..08a50b110 --- /dev/null +++ b/src/deps/skia/include/core/SkContourMeasure.h @@ -0,0 +1,131 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkContourMeasure_DEFINED +#define SkContourMeasure_DEFINED + +#include "include/core/SkPath.h" +#include "include/core/SkRefCnt.h" +#include "include/private/SkTDArray.h" + +struct SkConic; + +class SK_API SkContourMeasure : public SkRefCnt { +public: + /** Return the length of the contour. + */ + SkScalar length() const { return fLength; } + + /** Pins distance to 0 <= distance <= length(), and then computes the corresponding + * position and tangent. + */ + bool SK_WARN_UNUSED_RESULT getPosTan(SkScalar distance, SkPoint* position, + SkVector* tangent) const; + + enum MatrixFlags { + kGetPosition_MatrixFlag = 0x01, + kGetTangent_MatrixFlag = 0x02, + kGetPosAndTan_MatrixFlag = kGetPosition_MatrixFlag | kGetTangent_MatrixFlag + }; + + /** Pins distance to 0 <= distance <= getLength(), and then computes + the corresponding matrix (by calling getPosTan). + Returns false if there is no path, or a zero-length path was specified, in which case + matrix is unchanged. + */ + bool SK_WARN_UNUSED_RESULT getMatrix(SkScalar distance, SkMatrix* matrix, + MatrixFlags flags = kGetPosAndTan_MatrixFlag) const; + + /** Given a start and stop distance, return in dst the intervening segment(s). + If the segment is zero-length, return false, else return true. + startD and stopD are pinned to legal values (0..getLength()). If startD > stopD + then return false (and leave dst untouched). + Begin the segment with a moveTo if startWithMoveTo is true + */ + bool SK_WARN_UNUSED_RESULT getSegment(SkScalar startD, SkScalar stopD, SkPath* dst, + bool startWithMoveTo) const; + + /** Return true if the contour is closed() + */ + bool isClosed() const { return fIsClosed; } + +private: + struct Segment { + SkScalar fDistance; // total distance up to this point + unsigned fPtIndex; // index into the fPts array + unsigned fTValue : 30; + unsigned fType : 2; // actually the enum SkSegType + // See SkPathMeasurePriv.h + + SkScalar getScalarT() const; + + static const Segment* Next(const Segment* seg) { + unsigned ptIndex = seg->fPtIndex; + do { + ++seg; + } while (seg->fPtIndex == ptIndex); + return seg; + } + + }; + + const SkTDArray<Segment> fSegments; + const SkTDArray<SkPoint> fPts; // Points used to define the segments + + const SkScalar fLength; + const bool fIsClosed; + + SkContourMeasure(SkTDArray<Segment>&& segs, SkTDArray<SkPoint>&& pts, + SkScalar length, bool isClosed); + ~SkContourMeasure() override {} + + const Segment* distanceToSegment(SkScalar distance, SkScalar* t) const; + + friend class SkContourMeasureIter; +}; + +class SK_API SkContourMeasureIter { +public: + SkContourMeasureIter(); + /** + * Initialize the Iter with a path. + * The parts of the path that are needed are copied, so the client is free to modify/delete + * the path after this call. + * + * resScale controls the precision of the measure. values > 1 increase the + * precision (and possibly slow down the computation). + */ + SkContourMeasureIter(const SkPath& path, bool forceClosed, SkScalar resScale = 1); + ~SkContourMeasureIter(); + + /** + * Reset the Iter with a path. + * The parts of the path that are needed are copied, so the client is free to modify/delete + * the path after this call. + */ + void reset(const SkPath& path, bool forceClosed, SkScalar resScale = 1); + + /** + * Iterates through contours in path, returning a contour-measure object for each contour + * in the path. Returns null when it is done. + * + * This only returns non-zero length contours, where a contour is the segments between + * a kMove_Verb and either ... + * - the next kMove_Verb + * - kClose_Verb (1 or more) + * - kDone_Verb + * If it encounters a zero-length contour, it is skipped. + */ + sk_sp<SkContourMeasure> next(); + +private: + class Impl; + + std::unique_ptr<Impl> fImpl; +}; + +#endif diff --git a/src/deps/skia/include/core/SkCoverageMode.h b/src/deps/skia/include/core/SkCoverageMode.h new file mode 100644 index 000000000..ea5b73d1a --- /dev/null +++ b/src/deps/skia/include/core/SkCoverageMode.h @@ -0,0 +1,30 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCoverageMode_DEFINED +#define SkCoverageMode_DEFINED + +#include "include/core/SkTypes.h" + +/** + * Describes geometric operations (ala SkRegion::Op) that can be applied to coverage bytes. + * These can be thought of as variants of porter-duff (SkBlendMode) modes, but only applied + * to the alpha channel. + * + * See SkMaskFilter for ways to use these when combining two different masks. + */ +enum class SkCoverageMode { + kUnion, // A ∪ B A+B-A*B + kIntersect, // A ∩ B A*B + kDifference, // A - B A*(1-B) + kReverseDifference, // B - A B*(1-A) + kXor, // A ⊕ B A+B-2*A*B + + kLast = kXor, +}; + +#endif diff --git a/src/deps/skia/include/core/SkCubicMap.h b/src/deps/skia/include/core/SkCubicMap.h new file mode 100644 index 000000000..7389b92af --- /dev/null +++ b/src/deps/skia/include/core/SkCubicMap.h @@ -0,0 +1,45 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCubicMap_DEFINED +#define SkCubicMap_DEFINED + +#include "include/core/SkPoint.h" + +/** + * Fast evaluation of a cubic ease-in / ease-out curve. This is defined as a parametric cubic + * curve inside the unit square. + * + * pt[0] is implicitly { 0, 0 } + * pt[3] is implicitly { 1, 1 } + * pts[1,2].X are inside the unit [0..1] + */ +class SK_API SkCubicMap { +public: + SkCubicMap(SkPoint p1, SkPoint p2); + + static bool IsLinear(SkPoint p1, SkPoint p2) { + return SkScalarNearlyEqual(p1.fX, p1.fY) && SkScalarNearlyEqual(p2.fX, p2.fY); + } + + float computeYFromX(float x) const; + + SkPoint computeFromT(float t) const; + +private: + enum Type { + kLine_Type, // x == y + kCubeRoot_Type, // At^3 == x + kSolver_Type, // general monotonic cubic solver + }; + + SkPoint fCoeff[3]; + Type fType; +}; + +#endif + diff --git a/src/deps/skia/include/core/SkCustomMesh.h b/src/deps/skia/include/core/SkCustomMesh.h new file mode 100644 index 000000000..f9bddf338 --- /dev/null +++ b/src/deps/skia/include/core/SkCustomMesh.h @@ -0,0 +1,202 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCustomMesh_DEFINED +#define SkCustomMesh_DEFINED + +#include "include/core/SkTypes.h" + +#ifdef SK_ENABLE_SKSL +#include "include/core/SkColorSpace.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkRect.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkSpan.h" +#include "include/core/SkString.h" + +#include <vector> + +namespace SkSL { struct Program; } + +/** + * A specification for custom meshes. Specifies the vertex buffer attributes and stride, the + * vertex program that produces a user-defined set of varyings, a fragment program that ingests + * the interpolated varyings and produces local coordinates and optionally a color. + * + * The signature of the vertex program must be: + * float2 main(Attributes, out Varyings) + * where the return value is a local position that will be transformed by SkCanvas's matrix. + * + * The signature of the fragment program must be either: + * (float2|void) main(Varyings) + * or + * (float2|void) main(Varyings, out (half4|float4) color) + * + * where the return value is the local coordinates that will be used to access SkShader. If the + * return type is void then the interpolated position from vertex shader return is used as the local + * coordinate. If the color variant is used it will be blended with SkShader (or SkPaint color in + * absence of a shader) using the SkBlender provided to the SkCanvas draw call. + */ +class SkCustomMeshSpecification : public SkNVRefCnt<SkCustomMeshSpecification> { +public: + /** These values are enforced when creating a specification. */ + static constexpr size_t kMaxStride = 1024; + static constexpr size_t kMaxAttributes = 8; + static constexpr size_t kStrideAlignment = 4; + static constexpr size_t kOffsetAlignment = 4; + static constexpr size_t kMaxVaryings = 6; + + struct Attribute { + enum class Type : uint32_t { // CPU representation Shader Type + kFloat, // float float + kFloat2, // two floats float2 + kFloat3, // three floats float3 + kFloat4, // four floats float4 + kUByte4_unorm, // four bytes half4 + + kLast = kUByte4_unorm + }; + Type type; + size_t offset; + SkString name; + }; + + struct Varying { + enum class Type : uint32_t { + kFloat, // "float" + kFloat2, // "float2" + kFloat3, // "float3" + kFloat4, // "float4" + kHalf, // "half" + kHalf2, // "half2" + kHalf3, // "half3" + kHalf4, // "half4" + + kLast = kHalf4 + }; + Type type; + SkString name; + }; + + ~SkCustomMeshSpecification(); + + struct Result { + sk_sp<SkCustomMeshSpecification> specification; + SkString error; + }; + + /** + * If successful the return is a specification and an empty error string. Otherwise, it is a + * null specification a non-empty error string. + * + * @param attributes The vertex attributes that will be consumed by 'vs'. Attributes need + * not be tightly packed but attribute offsets must be aligned to + * kOffsetAlignment and offset + size may not be greater than + * 'vertexStride'. At least one attribute is required. + * @param vertexStride The offset between successive attribute values. This must be aligned to + * kStrideAlignment. + * @param varyings The varyings that will be written by 'vs' and read by 'fs'. This may + * be empty. + * @param vs The vertex shader code that computes a vertex position and the varyings + * from the attributes. + * @param fs The fragment code that computes a local coordinate and optionally a + * color from the varyings. The local coordinate is used to sample + * SkShader. + * @param cs The colorspace of the color produced by 'fs'. Ignored if 'fs's main() + * function does not have a color out param. + * @param at The alpha type of the color produced by 'fs'. Ignored if 'fs's main() + * function does not have a color out param. Cannot be kUnknown. + */ + static Result Make(SkSpan<const Attribute> attributes, + size_t vertexStride, + SkSpan<const Varying> varyings, + const SkString& vs, + const SkString& fs, + sk_sp<SkColorSpace> cs = SkColorSpace::MakeSRGB(), + SkAlphaType at = kPremul_SkAlphaType); + + SkSpan<const Attribute> attributes() const { return SkMakeSpan(fAttributes); } + + size_t stride() const { return fStride; } + +private: + friend struct SkCustomMeshSpecificationPriv; + + enum class ColorType { + kNone, + kHalf4, + kFloat4, + }; + + static Result MakeFromSourceWithStructs(SkSpan<const Attribute> attributes, + size_t stride, + SkSpan<const Varying> varyings, + const SkString& vs, + const SkString& fs, + sk_sp<SkColorSpace> cs, + SkAlphaType at); + + SkCustomMeshSpecification(SkSpan<const Attribute>, + size_t, + SkSpan<const Varying>, + std::unique_ptr<SkSL::Program>, + std::unique_ptr<SkSL::Program>, + ColorType, + bool hasLocalCoords, + sk_sp<SkColorSpace>, + SkAlphaType); + + SkCustomMeshSpecification(const SkCustomMeshSpecification&) = delete; + SkCustomMeshSpecification(SkCustomMeshSpecification&&) = delete; + + SkCustomMeshSpecification& operator=(const SkCustomMeshSpecification&) = delete; + SkCustomMeshSpecification& operator=(SkCustomMeshSpecification&&) = delete; + + const std::vector<Attribute> fAttributes; + const std::vector<Varying> fVaryings; + std::unique_ptr<SkSL::Program> fVS; + std::unique_ptr<SkSL::Program> fFS; + size_t fStride; + uint32_t fHash; + ColorType fColorType; + bool fHasLocalCoords; + sk_sp<SkColorSpace> fColorSpace; + SkAlphaType fAlphaType; +}; + +/** + * This is a placeholder object. We will want something that allows the client to incrementally + * update the mesh that can be synchronized with the GPU backend without requiring extra copies. + * + * A buffer of vertices, a topology, optionally indices, and a compatible SkCustomMeshSpecification. + * The data in 'vb' is expected to contain the attributes described in 'spec' for 'vcount' vertices. + * The size of the buffer must be at least spec->stride()*vcount (even if vertex attributes contains + * pad at the end of the stride). If 'bounds' does not contain all points output by 'spec''s vertex + * program when applied to the vertices in 'vb' a draw of the custom mesh produces undefined + * results. + * + * If indices is null then then 'icount' must be <= 0. 'vcount' vertices will be selected from 'vb' + * to create the topology indicated by 'mode'. + * + * If indices is not null then icount must be >= 3. 'vb' will be indexed by 'icount' successive + * values in 'indices' to create the topology indicated by 'mode'. The values in 'indices' must be + * less than 'vcount' + */ +struct SkCustomMesh { + enum class Mode { kTriangles, kTriangleStrip }; + sk_sp<SkCustomMeshSpecification> spec; + Mode mode = Mode::kTriangles; + SkRect bounds = SkRect::MakeEmpty(); + const void* vb = nullptr; + int vcount = 0; + const uint16_t* indices = nullptr; + int icount = 0; +}; + +#endif // SK_ENABLE_SKSL + +#endif diff --git a/src/deps/skia/include/core/SkData.h b/src/deps/skia/include/core/SkData.h new file mode 100644 index 000000000..2a4b40d5f --- /dev/null +++ b/src/deps/skia/include/core/SkData.h @@ -0,0 +1,188 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkData_DEFINED +#define SkData_DEFINED + +#include <stdio.h> + +#include "include/core/SkRefCnt.h" + +class SkStream; + +/** + * SkData holds an immutable data buffer. Not only is the data immutable, + * but the actual ptr that is returned (by data() or bytes()) is guaranteed + * to always be the same for the life of this instance. + */ +class SK_API SkData final : public SkNVRefCnt<SkData> { +public: + /** + * Returns the number of bytes stored. + */ + size_t size() const { return fSize; } + + bool isEmpty() const { return 0 == fSize; } + + /** + * Returns the ptr to the data. + */ + const void* data() const { return fPtr; } + + /** + * Like data(), returns a read-only ptr into the data, but in this case + * it is cast to uint8_t*, to make it easy to add an offset to it. + */ + const uint8_t* bytes() const { + return reinterpret_cast<const uint8_t*>(fPtr); + } + + /** + * USE WITH CAUTION. + * This call will assert that the refcnt is 1, as a precaution against modifying the + * contents when another client/thread has access to the data. + */ + void* writable_data() { + if (fSize) { + // only assert we're unique if we're not empty + SkASSERT(this->unique()); + } + return const_cast<void*>(fPtr); + } + + /** + * Helper to copy a range of the data into a caller-provided buffer. + * Returns the actual number of bytes copied, after clamping offset and + * length to the size of the data. If buffer is NULL, it is ignored, and + * only the computed number of bytes is returned. + */ + size_t copyRange(size_t offset, size_t length, void* buffer) const; + + /** + * Returns true if these two objects have the same length and contents, + * effectively returning 0 == memcmp(...) + */ + bool equals(const SkData* other) const; + + /** + * Function that, if provided, will be called when the SkData goes out + * of scope, allowing for custom allocation/freeing of the data's contents. + */ + typedef void (*ReleaseProc)(const void* ptr, void* context); + + /** + * Create a new dataref by copying the specified data + */ + static sk_sp<SkData> MakeWithCopy(const void* data, size_t length); + + + /** + * Create a new data with uninitialized contents. The caller should call writable_data() + * to write into the buffer, but this must be done before another ref() is made. + */ + static sk_sp<SkData> MakeUninitialized(size_t length); + + /** + * Create a new data with zero-initialized contents. The caller should call writable_data() + * to write into the buffer, but this must be done before another ref() is made. + */ + static sk_sp<SkData> MakeZeroInitialized(size_t length); + + /** + * Create a new dataref by copying the specified c-string + * (a null-terminated array of bytes). The returned SkData will have size() + * equal to strlen(cstr) + 1. If cstr is NULL, it will be treated the same + * as "". + */ + static sk_sp<SkData> MakeWithCString(const char cstr[]); + + /** + * Create a new dataref, taking the ptr as is, and using the + * releaseproc to free it. The proc may be NULL. + */ + static sk_sp<SkData> MakeWithProc(const void* ptr, size_t length, ReleaseProc proc, void* ctx); + + /** + * Call this when the data parameter is already const and will outlive the lifetime of the + * SkData. Suitable for with const globals. + */ + static sk_sp<SkData> MakeWithoutCopy(const void* data, size_t length) { + return MakeWithProc(data, length, NoopReleaseProc, nullptr); + } + + /** + * Create a new dataref from a pointer allocated by malloc. The Data object + * takes ownership of that allocation, and will handling calling sk_free. + */ + static sk_sp<SkData> MakeFromMalloc(const void* data, size_t length); + + /** + * Create a new dataref the file with the specified path. + * If the file cannot be opened, this returns NULL. + */ + static sk_sp<SkData> MakeFromFileName(const char path[]); + + /** + * Create a new dataref from a stdio FILE. + * This does not take ownership of the FILE, nor close it. + * The caller is free to close the FILE at its convenience. + * The FILE must be open for reading only. + * Returns NULL on failure. + */ + static sk_sp<SkData> MakeFromFILE(FILE* f); + + /** + * Create a new dataref from a file descriptor. + * This does not take ownership of the file descriptor, nor close it. + * The caller is free to close the file descriptor at its convenience. + * The file descriptor must be open for reading only. + * Returns NULL on failure. + */ + static sk_sp<SkData> MakeFromFD(int fd); + + /** + * Attempt to read size bytes into a SkData. If the read succeeds, return the data, + * else return NULL. Either way the stream's cursor may have been changed as a result + * of calling read(). + */ + static sk_sp<SkData> MakeFromStream(SkStream*, size_t size); + + /** + * Create a new dataref using a subset of the data in the specified + * src dataref. + */ + static sk_sp<SkData> MakeSubset(const SkData* src, size_t offset, size_t length); + + /** + * Returns a new empty dataref (or a reference to a shared empty dataref). + * New or shared, the caller must see that unref() is eventually called. + */ + static sk_sp<SkData> MakeEmpty(); + +private: + friend class SkNVRefCnt<SkData>; + ReleaseProc fReleaseProc; + void* fReleaseProcContext; + const void* fPtr; + size_t fSize; + + SkData(const void* ptr, size_t size, ReleaseProc, void* context); + explicit SkData(size_t size); // inplace new/delete + ~SkData(); + + // Ensure the unsized delete is called. + void operator delete(void* p); + + // shared internal factory + static sk_sp<SkData> PrivateNewWithCopy(const void* srcOrNull, size_t length); + + static void NoopReleaseProc(const void*, void*); // {} + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/core/SkDataTable.h b/src/deps/skia/include/core/SkDataTable.h new file mode 100644 index 000000000..a6a510b7b --- /dev/null +++ b/src/deps/skia/include/core/SkDataTable.h @@ -0,0 +1,118 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkDataTable_DEFINED +#define SkDataTable_DEFINED + +#include "include/core/SkData.h" +#include "include/private/SkTDArray.h" + +/** + * Like SkData, SkDataTable holds an immutable data buffer. The data buffer is + * organized into a table of entries, each with a length, so the entries are + * not required to all be the same size. + */ +class SK_API SkDataTable : public SkRefCnt { +public: + /** + * Returns true if the table is empty (i.e. has no entries). + */ + bool isEmpty() const { return 0 == fCount; } + + /** + * Return the number of entries in the table. 0 for an empty table + */ + int count() const { return fCount; } + + /** + * Return the size of the index'th entry in the table. The caller must + * ensure that index is valid for this table. + */ + size_t atSize(int index) const; + + /** + * Return a pointer to the data of the index'th entry in the table. + * The caller must ensure that index is valid for this table. + * + * @param size If non-null, this returns the byte size of this entry. This + * will be the same value that atSize(index) would return. + */ + const void* at(int index, size_t* size = nullptr) const; + + template <typename T> + const T* atT(int index, size_t* size = nullptr) const { + return reinterpret_cast<const T*>(this->at(index, size)); + } + + /** + * Returns the index'th entry as a c-string, and assumes that the trailing + * null byte had been copied into the table as well. + */ + const char* atStr(int index) const { + size_t size; + const char* str = this->atT<const char>(index, &size); + SkASSERT(strlen(str) + 1 == size); + return str; + } + + typedef void (*FreeProc)(void* context); + + static sk_sp<SkDataTable> MakeEmpty(); + + /** + * Return a new DataTable that contains a copy of the data stored in each + * "array". + * + * @param ptrs array of points to each element to be copied into the table. + * @param sizes array of byte-lengths for each entry in the corresponding + * ptrs[] array. + * @param count the number of array elements in ptrs[] and sizes[] to copy. + */ + static sk_sp<SkDataTable> MakeCopyArrays(const void * const * ptrs, + const size_t sizes[], int count); + + /** + * Return a new table that contains a copy of the data in array. + * + * @param array contiguous array of data for all elements to be copied. + * @param elemSize byte-length for a given element. + * @param count the number of entries to be copied out of array. The number + * of bytes that will be copied is count * elemSize. + */ + static sk_sp<SkDataTable> MakeCopyArray(const void* array, size_t elemSize, int count); + + static sk_sp<SkDataTable> MakeArrayProc(const void* array, size_t elemSize, int count, + FreeProc proc, void* context); + +private: + struct Dir { + const void* fPtr; + uintptr_t fSize; + }; + + int fCount; + size_t fElemSize; + union { + const Dir* fDir; + const char* fElems; + } fU; + + FreeProc fFreeProc; + void* fFreeProcContext; + + SkDataTable(); + SkDataTable(const void* array, size_t elemSize, int count, + FreeProc, void* context); + SkDataTable(const Dir*, int count, FreeProc, void* context); + ~SkDataTable() override; + + friend class SkDataTableBuilder; // access to Dir + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/core/SkDeferredDisplayList.h b/src/deps/skia/include/core/SkDeferredDisplayList.h new file mode 100644 index 000000000..28e460fa8 --- /dev/null +++ b/src/deps/skia/include/core/SkDeferredDisplayList.h @@ -0,0 +1,110 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkDeferredDisplayList_DEFINED +#define SkDeferredDisplayList_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkSurfaceCharacterization.h" +#include "include/core/SkTypes.h" + +class SkDeferredDisplayListPriv; + +#if SK_SUPPORT_GPU +#include "include/gpu/GrRecordingContext.h" +#include "include/private/SkTArray.h" +#include <map> +class GrRenderTask; +class GrRenderTargetProxy; +#else +using GrRenderTargetProxy = SkRefCnt; +#endif + +/* + * This class contains pre-processed gpu operations that can be replayed into + * an SkSurface via SkSurface::draw(SkDeferredDisplayList*). + */ +class SkDeferredDisplayList : public SkNVRefCnt<SkDeferredDisplayList> { +public: + SK_API ~SkDeferredDisplayList(); + + SK_API const SkSurfaceCharacterization& characterization() const { + return fCharacterization; + } + +#if SK_SUPPORT_GPU + /** + * Iterate through the programs required by the DDL. + */ + class SK_API ProgramIterator { + public: + ProgramIterator(GrDirectContext*, SkDeferredDisplayList*); + ~ProgramIterator(); + + // This returns true if any work was done. Getting a cache hit does not count as work. + bool compile(); + bool done() const; + void next(); + + private: + GrDirectContext* fDContext; + const SkTArray<GrRecordingContext::ProgramData>& fProgramData; + int fIndex; + }; +#endif + + // Provides access to functions that aren't part of the public API. + SkDeferredDisplayListPriv priv(); + const SkDeferredDisplayListPriv priv() const; // NOLINT(readability-const-return-type) + +private: + friend class GrDrawingManager; // for access to 'fRenderTasks', 'fLazyProxyData', 'fArenas' + friend class SkDeferredDisplayListRecorder; // for access to 'fLazyProxyData' + friend class SkDeferredDisplayListPriv; + + // This object is the source from which the lazy proxy backing the DDL will pull its backing + // texture when the DDL is replayed. It has to be separately ref counted bc the lazy proxy + // can outlive the DDL. + class LazyProxyData : public SkRefCnt { +#if SK_SUPPORT_GPU + public: + // Upon being replayed - this field will be filled in (by the DrawingManager) with the + // proxy backing the destination SkSurface. Note that, since there is no good place to + // clear it, it can become a dangling pointer. Additionally, since the renderTargetProxy + // doesn't get a ref here, the SkSurface that owns it must remain alive until the DDL + // is flushed. + // TODO: the drawing manager could ref the renderTargetProxy for the DDL and then add + // a renderingTask to unref it after the DDL's ops have been executed. + GrRenderTargetProxy* fReplayDest = nullptr; +#endif + }; + + SK_API SkDeferredDisplayList(const SkSurfaceCharacterization& characterization, + sk_sp<GrRenderTargetProxy> fTargetProxy, + sk_sp<LazyProxyData>); + +#if SK_SUPPORT_GPU + const SkTArray<GrRecordingContext::ProgramData>& programData() const { + return fProgramData; + } +#endif + + const SkSurfaceCharacterization fCharacterization; + +#if SK_SUPPORT_GPU + // These are ordered such that the destructor cleans op tasks up first (which may refer back + // to the arena and memory pool in their destructors). + GrRecordingContext::OwnedArenas fArenas; + SkTArray<sk_sp<GrRenderTask>> fRenderTasks; + + SkTArray<GrRecordingContext::ProgramData> fProgramData; + sk_sp<GrRenderTargetProxy> fTargetProxy; + sk_sp<LazyProxyData> fLazyProxyData; +#endif +}; + +#endif diff --git a/src/deps/skia/include/core/SkDeferredDisplayListRecorder.h b/src/deps/skia/include/core/SkDeferredDisplayListRecorder.h new file mode 100644 index 000000000..30a4a73ee --- /dev/null +++ b/src/deps/skia/include/core/SkDeferredDisplayListRecorder.h @@ -0,0 +1,97 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkDeferredDisplayListRecorder_DEFINED +#define SkDeferredDisplayListRecorder_DEFINED + +#include "include/core/SkDeferredDisplayList.h" +#include "include/core/SkImage.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkSurfaceCharacterization.h" +#include "include/core/SkTypes.h" + +class GrBackendFormat; +class GrBackendTexture; +class GrRecordingContext; +class GrYUVABackendTextureInfo; +class SkCanvas; +class SkSurface; + +/* + * This class is intended to be used as: + * Get an SkSurfaceCharacterization representing the intended gpu-backed destination SkSurface + * Create one of these (an SkDeferredDisplayListRecorder) on the stack + * Get the canvas and render into it + * Snap off and hold on to an SkDeferredDisplayList + * Once your app actually needs the pixels, call SkSurface::draw(SkDeferredDisplayList*) + * + * This class never accesses the GPU but performs all the cpu work it can. It + * is thread-safe (i.e., one can break a scene into tiles and perform their cpu-side + * work in parallel ahead of time). + */ +class SK_API SkDeferredDisplayListRecorder { +public: + SkDeferredDisplayListRecorder(const SkSurfaceCharacterization&); + ~SkDeferredDisplayListRecorder(); + + const SkSurfaceCharacterization& characterization() const { + return fCharacterization; + } + + // The backing canvas will become invalid (and this entry point will return + // null) once 'detach' is called. + // Note: ownership of the SkCanvas is not transferred via this call. + SkCanvas* getCanvas(); + + sk_sp<SkDeferredDisplayList> detach(); + +#if SK_SUPPORT_GPU + using PromiseImageTextureContext = SkImage::PromiseImageTextureContext; + using PromiseImageTextureFulfillProc = SkImage::PromiseImageTextureFulfillProc; + using PromiseImageTextureReleaseProc = SkImage::PromiseImageTextureReleaseProc; + +#ifndef SK_MAKE_PROMISE_TEXTURE_DISABLE_LEGACY_API + /** Deprecated: Use SkImage::MakePromiseTexture instead. */ + sk_sp<SkImage> makePromiseTexture(const GrBackendFormat& backendFormat, + int width, + int height, + GrMipmapped mipMapped, + GrSurfaceOrigin origin, + SkColorType colorType, + SkAlphaType alphaType, + sk_sp<SkColorSpace> colorSpace, + PromiseImageTextureFulfillProc textureFulfillProc, + PromiseImageTextureReleaseProc textureReleaseProc, + PromiseImageTextureContext textureContext); + + /** Deprecated: Use SkImage::MakePromiseYUVATexture instead. */ + sk_sp<SkImage> makeYUVAPromiseTexture(const GrYUVABackendTextureInfo& yuvaBackendTextureInfo, + sk_sp<SkColorSpace> imageColorSpace, + PromiseImageTextureFulfillProc textureFulfillProc, + PromiseImageTextureReleaseProc textureReleaseProc, + PromiseImageTextureContext textureContexts[]); +#endif // SK_MAKE_PROMISE_TEXTURE_DISABLE_LEGACY_API +#endif // SK_SUPPORT_GPU + +private: + SkDeferredDisplayListRecorder(const SkDeferredDisplayListRecorder&) = delete; + SkDeferredDisplayListRecorder& operator=(const SkDeferredDisplayListRecorder&) = delete; + + bool init(); + + const SkSurfaceCharacterization fCharacterization; + +#if SK_SUPPORT_GPU + sk_sp<GrRecordingContext> fContext; + sk_sp<GrRenderTargetProxy> fTargetProxy; + sk_sp<SkDeferredDisplayList::LazyProxyData> fLazyProxyData; + sk_sp<SkSurface> fSurface; +#endif +}; + +#endif diff --git a/src/deps/skia/include/core/SkDocument.h b/src/deps/skia/include/core/SkDocument.h new file mode 100644 index 000000000..eacfb2c04 --- /dev/null +++ b/src/deps/skia/include/core/SkDocument.h @@ -0,0 +1,91 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkDocument_DEFINED +#define SkDocument_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkScalar.h" + +class SkCanvas; +class SkWStream; +struct SkRect; + +/** SK_ScalarDefaultDPI is 72 dots per inch. */ +static constexpr SkScalar SK_ScalarDefaultRasterDPI = 72.0f; + +/** + * High-level API for creating a document-based canvas. To use.. + * + * 1. Create a document, specifying a stream to store the output. + * 2. For each "page" of content: + * a. canvas = doc->beginPage(...) + * b. draw_my_content(canvas); + * c. doc->endPage(); + * 3. Close the document with doc->close(). + */ +class SK_API SkDocument : public SkRefCnt { +public: + + /** + * Begin a new page for the document, returning the canvas that will draw + * into the page. The document owns this canvas, and it will go out of + * scope when endPage() or close() is called, or the document is deleted. + */ + SkCanvas* beginPage(SkScalar width, SkScalar height, const SkRect* content = nullptr); + + /** + * Call endPage() when the content for the current page has been drawn + * (into the canvas returned by beginPage()). After this call the canvas + * returned by beginPage() will be out-of-scope. + */ + void endPage(); + + /** + * Call close() when all pages have been drawn. This will close the file + * or stream holding the document's contents. After close() the document + * can no longer add new pages. Deleting the document will automatically + * call close() if need be. + */ + void close(); + + /** + * Call abort() to stop producing the document immediately. + * The stream output must be ignored, and should not be trusted. + */ + void abort(); + +protected: + SkDocument(SkWStream*); + + // note: subclasses must call close() in their destructor, as the base class + // cannot do this for them. + ~SkDocument() override; + + virtual SkCanvas* onBeginPage(SkScalar width, SkScalar height) = 0; + virtual void onEndPage() = 0; + virtual void onClose(SkWStream*) = 0; + virtual void onAbort() = 0; + + // Allows subclasses to write to the stream as pages are written. + SkWStream* getStream() { return fStream; } + + enum State { + kBetweenPages_State, + kInPage_State, + kClosed_State + }; + State getState() const { return fState; } + +private: + SkWStream* fStream; + State fState; + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/core/SkDrawLooper.h b/src/deps/skia/include/core/SkDrawLooper.h new file mode 100644 index 000000000..69d341c25 --- /dev/null +++ b/src/deps/skia/include/core/SkDrawLooper.h @@ -0,0 +1,135 @@ + +/* + * Copyright 2011 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + +#ifndef SkDrawLooper_DEFINED +#define SkDrawLooper_DEFINED + +#include "include/core/SkBlurTypes.h" +#include "include/core/SkColor.h" +#include "include/core/SkFlattenable.h" +#include "include/core/SkPoint.h" +#include <functional> // std::function + +#ifndef SK_SUPPORT_LEGACY_DRAWLOOPER +#error "SkDrawLooper is unsupported" +#endif + +class SkArenaAlloc; +class SkCanvas; +class SkMatrix; +class SkPaint; +struct SkRect; + +/** \class SkDrawLooper + DEPRECATED: No longer supported in Skia. +*/ +class SK_API SkDrawLooper : public SkFlattenable { +public: + /** + * Holds state during a draw. Users call next() until it returns false. + * + * Subclasses of SkDrawLooper should create a subclass of this object to + * hold state specific to their subclass. + */ + class SK_API Context { + public: + Context() {} + virtual ~Context() {} + + struct Info { + SkVector fTranslate; + bool fApplyPostCTM; + + void applyToCTM(SkMatrix* ctm) const; + void applyToCanvas(SkCanvas*) const; + }; + + /** + * Called in a loop on objects returned by SkDrawLooper::createContext(). + * Each time true is returned, the object is drawn (possibly with a modified + * canvas and/or paint). When false is finally returned, drawing for the object + * stops. + * + * On each call, the paint will be in its original state, but the + * canvas will be as it was following the previous call to next() or + * createContext(). + * + * The implementation must ensure that, when next() finally returns + * false, the canvas has been restored to the state it was + * initially, before createContext() was first called. + */ + virtual bool next(Info*, SkPaint*) = 0; + + private: + Context(const Context&) = delete; + Context& operator=(const Context&) = delete; + }; + + /** + * Called right before something is being drawn. Returns a Context + * whose next() method should be called until it returns false. + */ + virtual Context* makeContext(SkArenaAlloc*) const = 0; + + /** + * The fast bounds functions are used to enable the paint to be culled early + * in the drawing pipeline. If a subclass can support this feature it must + * return true for the canComputeFastBounds() function. If that function + * returns false then computeFastBounds behavior is undefined otherwise it + * is expected to have the following behavior. Given the parent paint and + * the parent's bounding rect the subclass must fill in and return the + * storage rect, where the storage rect is with the union of the src rect + * and the looper's bounding rect. + */ + bool canComputeFastBounds(const SkPaint& paint) const; + void computeFastBounds(const SkPaint& paint, const SkRect& src, SkRect* dst) const; + + struct BlurShadowRec { + SkScalar fSigma; + SkVector fOffset; + SkColor fColor; + SkBlurStyle fStyle; + }; + /** + * If this looper can be interpreted as having two layers, such that + * 1. The first layer (bottom most) just has a blur and translate + * 2. The second layer has no modifications to either paint or canvas + * 3. No other layers. + * then return true, and if not null, fill out the BlurShadowRec). + * + * If any of the above are not met, return false and ignore the BlurShadowRec parameter. + */ + virtual bool asABlurShadow(BlurShadowRec*) const; + + static SkFlattenable::Type GetFlattenableType() { + return kSkDrawLooper_Type; + } + + SkFlattenable::Type getFlattenableType() const override { + return kSkDrawLooper_Type; + } + + static sk_sp<SkDrawLooper> Deserialize(const void* data, size_t size, + const SkDeserialProcs* procs = nullptr) { + return sk_sp<SkDrawLooper>(static_cast<SkDrawLooper*>( + SkFlattenable::Deserialize( + kSkDrawLooper_Type, data, size, procs).release())); + } + + void apply(SkCanvas* canvas, const SkPaint& paint, + std::function<void(SkCanvas*, const SkPaint&)>); + +protected: + SkDrawLooper() {} + +private: + using INHERITED = SkFlattenable; +}; + +#endif diff --git a/src/deps/skia/include/core/SkDrawable.h b/src/deps/skia/include/core/SkDrawable.h new file mode 100644 index 000000000..8d605f80d --- /dev/null +++ b/src/deps/skia/include/core/SkDrawable.h @@ -0,0 +1,160 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkDrawable_DEFINED +#define SkDrawable_DEFINED + +#include "include/core/SkFlattenable.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkScalar.h" + +class GrBackendDrawableInfo; +class SkCanvas; +class SkMatrix; +class SkPicture; +enum class GrBackendApi : unsigned; +struct SkRect; + +/** + * Base-class for objects that draw into SkCanvas. + * + * The object has a generation ID, which is guaranteed to be unique across all drawables. To + * allow for clients of the drawable that may want to cache the results, the drawable must + * change its generation ID whenever its internal state changes such that it will draw differently. + */ +class SK_API SkDrawable : public SkFlattenable { +public: + /** + * Draws into the specified content. The drawing sequence will be balanced upon return + * (i.e. the saveLevel() on the canvas will match what it was when draw() was called, + * and the current matrix and clip settings will not be changed. + */ + void draw(SkCanvas*, const SkMatrix* = nullptr); + void draw(SkCanvas*, SkScalar x, SkScalar y); + + /** + * When using the GPU backend it is possible for a drawable to execute using the underlying 3D + * API rather than the SkCanvas API. It does so by creating a GpuDrawHandler. The GPU backend + * is deferred so the handler will be given access to the 3D API at the correct point in the + * drawing stream as the GPU backend flushes. Since the drawable may mutate, each time it is + * drawn to a GPU-backed canvas a new handler is snapped, representing the drawable's state at + * the time of the snap. + * + * When the GPU backend flushes to the 3D API it will call the draw method on the + * GpuDrawHandler. At this time the drawable may add commands to the stream of GPU commands for + * the unerlying 3D API. The draw function takes a GrBackendDrawableInfo which contains + * information about the current state of 3D API which the caller must respect. See + * GrBackendDrawableInfo for more specific details on what information is sent and the + * requirements for different 3D APIs. + * + * Additionaly there may be a slight delay from when the drawable adds its commands to when + * those commands are actually submitted to the GPU. Thus the drawable or GpuDrawHandler is + * required to keep any resources that are used by its added commands alive and valid until + * those commands are submitted to the GPU. The GpuDrawHandler will be kept alive and then + * deleted once the commands are submitted to the GPU. The dtor of the GpuDrawHandler is the + * signal to the drawable that the commands have all been submitted. Different 3D APIs may have + * additional requirements for certain resources which require waiting for the GPU to finish + * all work on those resources before reusing or deleting them. In this case, the drawable can + * use the dtor call of the GpuDrawHandler to add a fence to the GPU to track when the GPU work + * has completed. + * + * Currently this is only supported for the GPU Vulkan backend. + */ + + class GpuDrawHandler { + public: + virtual ~GpuDrawHandler() {} + + virtual void draw(const GrBackendDrawableInfo&) {} + }; + + /** + * Snaps off a GpuDrawHandler to represent the state of the SkDrawable at the time the snap is + * called. This is used for executing GPU backend specific draws intermixed with normal Skia GPU + * draws. The GPU API, which will be used for the draw, as well as the full matrix, device clip + * bounds and imageInfo of the target buffer are passed in as inputs. + */ + std::unique_ptr<GpuDrawHandler> snapGpuDrawHandler(GrBackendApi backendApi, + const SkMatrix& matrix, + const SkIRect& clipBounds, + const SkImageInfo& bufferInfo) { + return this->onSnapGpuDrawHandler(backendApi, matrix, clipBounds, bufferInfo); + } + + SkPicture* newPictureSnapshot(); + + /** + * Return a unique value for this instance. If two calls to this return the same value, + * it is presumed that calling the draw() method will render the same thing as well. + * + * Subclasses that change their state should call notifyDrawingChanged() to ensure that + * a new value will be returned the next time it is called. + */ + uint32_t getGenerationID(); + + /** + * Return the (conservative) bounds of what the drawable will draw. If the drawable can + * change what it draws (e.g. animation or in response to some external change), then this + * must return a bounds that is always valid for all possible states. + */ + SkRect getBounds(); + + /** + * Calling this invalidates the previous generation ID, and causes a new one to be computed + * the next time getGenerationID() is called. Typically this is called by the object itself, + * in response to its internal state changing. + */ + void notifyDrawingChanged(); + + static SkFlattenable::Type GetFlattenableType() { + return kSkDrawable_Type; + } + + SkFlattenable::Type getFlattenableType() const override { + return kSkDrawable_Type; + } + + static sk_sp<SkDrawable> Deserialize(const void* data, size_t size, + const SkDeserialProcs* procs = nullptr) { + return sk_sp<SkDrawable>(static_cast<SkDrawable*>( + SkFlattenable::Deserialize( + kSkDrawable_Type, data, size, procs).release())); + } + + Factory getFactory() const override { return nullptr; } + const char* getTypeName() const override { return nullptr; } + +protected: + SkDrawable(); + + virtual SkRect onGetBounds() = 0; + virtual void onDraw(SkCanvas*) = 0; + + virtual std::unique_ptr<GpuDrawHandler> onSnapGpuDrawHandler(GrBackendApi, const SkMatrix&, + const SkIRect& /*clipBounds*/, + const SkImageInfo&) { + return nullptr; + } + + // TODO: Delete this once Android gets updated to take the clipBounds version above. + virtual std::unique_ptr<GpuDrawHandler> onSnapGpuDrawHandler(GrBackendApi, const SkMatrix&) { + return nullptr; + } + + /** + * Default implementation calls onDraw() with a canvas that records into a picture. Subclasses + * may override if they have a more efficient way to return a picture for the current state + * of their drawable. Note: this picture must draw the same as what would be drawn from + * onDraw(). + */ + virtual SkPicture* onNewPictureSnapshot(); + +private: + int32_t fGenerationID; +}; + +#endif diff --git a/src/deps/skia/include/core/SkEncodedImageFormat.h b/src/deps/skia/include/core/SkEncodedImageFormat.h new file mode 100644 index 000000000..97add6dea --- /dev/null +++ b/src/deps/skia/include/core/SkEncodedImageFormat.h @@ -0,0 +1,35 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkEncodedImageFormat_DEFINED +#define SkEncodedImageFormat_DEFINED + +#include <stdint.h> + +/** + * Enum describing format of encoded data. + */ +enum class SkEncodedImageFormat { +#ifdef SK_BUILD_FOR_GOOGLE3 + kUnknown, +#endif + kBMP, + kGIF, + kICO, + kJPEG, + kPNG, + kWBMP, + kWEBP, + kPKM, + kKTX, + kASTC, + kDNG, + kHEIF, + kAVIF, +}; + +#endif // SkEncodedImageFormat_DEFINED diff --git a/src/deps/skia/include/core/SkExecutor.h b/src/deps/skia/include/core/SkExecutor.h new file mode 100644 index 000000000..88e2ca6e5 --- /dev/null +++ b/src/deps/skia/include/core/SkExecutor.h @@ -0,0 +1,41 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkExecutor_DEFINED +#define SkExecutor_DEFINED + +#include <functional> +#include <memory> +#include "include/core/SkTypes.h" + +class SK_API SkExecutor { +public: + virtual ~SkExecutor(); + + // Create a thread pool SkExecutor with a fixed thread count, by default the number of cores. + static std::unique_ptr<SkExecutor> MakeFIFOThreadPool(int threads = 0, + bool allowBorrowing = true); + static std::unique_ptr<SkExecutor> MakeLIFOThreadPool(int threads = 0, + bool allowBorrowing = true); + + // There is always a default SkExecutor available by calling SkExecutor::GetDefault(). + static SkExecutor& GetDefault(); + static void SetDefault(SkExecutor*); // Does not take ownership. Not thread safe. + + // Add work to execute. + virtual void add(std::function<void(void)>) = 0; + + // If it makes sense for this executor, use this thread to execute work for a little while. + virtual void borrow() {} + +protected: + SkExecutor() = default; + SkExecutor(const SkExecutor&) = delete; + SkExecutor& operator=(const SkExecutor&) = delete; +}; + +#endif//SkExecutor_DEFINED diff --git a/src/deps/skia/include/core/SkFlattenable.h b/src/deps/skia/include/core/SkFlattenable.h new file mode 100644 index 000000000..916ee174f --- /dev/null +++ b/src/deps/skia/include/core/SkFlattenable.h @@ -0,0 +1,113 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFlattenable_DEFINED +#define SkFlattenable_DEFINED + +#include "include/core/SkRefCnt.h" + +class SkData; +class SkReadBuffer; +class SkWriteBuffer; + +struct SkSerialProcs; +struct SkDeserialProcs; + +/** \class SkFlattenable + + SkFlattenable is the base class for objects that need to be flattened + into a data stream for either transport or as part of the key to the + font cache. + */ +class SK_API SkFlattenable : public SkRefCnt { +public: + enum Type { + kSkColorFilter_Type, + kSkBlender_Type, + kSkDrawable_Type, + kSkDrawLooper_Type, // no longer used internally by Skia + kSkImageFilter_Type, + kSkMaskFilter_Type, + kSkPathEffect_Type, + kSkShader_Type, + }; + + typedef sk_sp<SkFlattenable> (*Factory)(SkReadBuffer&); + + SkFlattenable() {} + + /** Implement this to return a factory function pointer that can be called + to recreate your class given a buffer (previously written to by your + override of flatten(). + */ + virtual Factory getFactory() const = 0; + + /** + * Returns the name of the object's class. + */ + virtual const char* getTypeName() const = 0; + + static Factory NameToFactory(const char name[]); + static const char* FactoryToName(Factory); + + static void Register(const char name[], Factory); + + /** + * Override this if your subclass needs to record data that it will need to recreate itself + * from its CreateProc (returned by getFactory()). + * + * DEPRECATED public : will move to protected ... use serialize() instead + */ + virtual void flatten(SkWriteBuffer&) const {} + + virtual Type getFlattenableType() const = 0; + + // + // public ways to serialize / deserialize + // + sk_sp<SkData> serialize(const SkSerialProcs* = nullptr) const; + size_t serialize(void* memory, size_t memory_size, + const SkSerialProcs* = nullptr) const; + static sk_sp<SkFlattenable> Deserialize(Type, const void* data, size_t length, + const SkDeserialProcs* procs = nullptr); + +protected: + class PrivateInitializer { + public: + static void InitEffects(); + static void InitImageFilters(); + }; + +private: + static void RegisterFlattenablesIfNeeded(); + static void Finalize(); + + friend class SkGraphics; + + using INHERITED = SkRefCnt; +}; + +#if defined(SK_DISABLE_EFFECT_DESERIALIZATION) + #define SK_REGISTER_FLATTENABLE(type) do{}while(false) + + #define SK_FLATTENABLE_HOOKS(type) \ + static sk_sp<SkFlattenable> CreateProc(SkReadBuffer&); \ + friend class SkFlattenable::PrivateInitializer; \ + Factory getFactory() const override { return nullptr; } \ + const char* getTypeName() const override { return #type; } +#else + #define SK_REGISTER_FLATTENABLE(type) \ + SkFlattenable::Register(#type, type::CreateProc) + + #define SK_FLATTENABLE_HOOKS(type) \ + static sk_sp<SkFlattenable> CreateProc(SkReadBuffer&); \ + friend class SkFlattenable::PrivateInitializer; \ + Factory getFactory() const override { return type::CreateProc; } \ + const char* getTypeName() const override { return #type; } +#endif + +#endif diff --git a/src/deps/skia/include/core/SkFont.h b/src/deps/skia/include/core/SkFont.h new file mode 100644 index 000000000..947e4dd77 --- /dev/null +++ b/src/deps/skia/include/core/SkFont.h @@ -0,0 +1,534 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFont_DEFINED +#define SkFont_DEFINED + +#include "include/core/SkFontTypes.h" +#include "include/core/SkScalar.h" +#include "include/core/SkTypeface.h" + +#include <vector> + +class SkMatrix; +class SkPaint; +class SkPath; +struct SkFontMetrics; + +/** \class SkFont + SkFont controls options applied when drawing and measuring text. +*/ +class SK_API SkFont { +public: + /** Whether edge pixels draw opaque or with partial transparency. + */ + enum class Edging { + kAlias, //!< no transparent pixels on glyph edges + kAntiAlias, //!< may have transparent pixels on glyph edges + kSubpixelAntiAlias, //!< glyph positioned in pixel using transparency + }; + + /** Constructs SkFont with default values. + + @return default initialized SkFont + */ + SkFont(); + + /** Constructs SkFont with default values with SkTypeface and size in points. + + @param typeface font and style used to draw and measure text + @param size typographic height of text + @return initialized SkFont + */ + SkFont(sk_sp<SkTypeface> typeface, SkScalar size); + + /** Constructs SkFont with default values with SkTypeface. + + @param typeface font and style used to draw and measure text + @return initialized SkFont + */ + explicit SkFont(sk_sp<SkTypeface> typeface); + + + /** Constructs SkFont with default values with SkTypeface and size in points, + horizontal scale, and horizontal skew. Horizontal scale emulates condensed + and expanded fonts. Horizontal skew emulates oblique fonts. + + @param typeface font and style used to draw and measure text + @param size typographic height of text + @param scaleX text horizontal scale + @param skewX additional shear on x-axis relative to y-axis + @return initialized SkFont + */ + SkFont(sk_sp<SkTypeface> typeface, SkScalar size, SkScalar scaleX, SkScalar skewX); + + + /** Compares SkFont and font, and returns true if they are equivalent. + May return false if SkTypeface has identical contents but different pointers. + + @param font font to compare + @return true if SkFont pair are equivalent + */ + bool operator==(const SkFont& font) const; + + /** Compares SkFont and font, and returns true if they are not equivalent. + May return true if SkTypeface has identical contents but different pointers. + + @param font font to compare + @return true if SkFont pair are not equivalent + */ + bool operator!=(const SkFont& font) const { return !(*this == font); } + + /** If true, instructs the font manager to always hint glyphs. + Returned value is only meaningful if platform uses FreeType as the font manager. + + @return true if all glyphs are hinted + */ + bool isForceAutoHinting() const { return SkToBool(fFlags & kForceAutoHinting_PrivFlag); } + + /** Returns true if font engine may return glyphs from font bitmaps instead of from outlines. + + @return true if glyphs may be font bitmaps + */ + bool isEmbeddedBitmaps() const { return SkToBool(fFlags & kEmbeddedBitmaps_PrivFlag); } + + /** Returns true if glyphs may be drawn at sub-pixel offsets. + + @return true if glyphs may be drawn at sub-pixel offsets. + */ + bool isSubpixel() const { return SkToBool(fFlags & kSubpixel_PrivFlag); } + + /** Returns true if font and glyph metrics are requested to be linearly scalable. + + @return true if font and glyph metrics are requested to be linearly scalable. + */ + bool isLinearMetrics() const { return SkToBool(fFlags & kLinearMetrics_PrivFlag); } + + /** Returns true if bold is approximated by increasing the stroke width when creating glyph + bitmaps from outlines. + + @return bold is approximated through stroke width + */ + bool isEmbolden() const { return SkToBool(fFlags & kEmbolden_PrivFlag); } + + /** Returns true if baselines will be snapped to pixel positions when the current transformation + matrix is axis aligned. + + @return baselines may be snapped to pixels + */ + bool isBaselineSnap() const { return SkToBool(fFlags & kBaselineSnap_PrivFlag); } + + /** Sets whether to always hint glyphs. + If forceAutoHinting is set, instructs the font manager to always hint glyphs. + + Only affects platforms that use FreeType as the font manager. + + @param forceAutoHinting setting to always hint glyphs + */ + void setForceAutoHinting(bool forceAutoHinting); + + /** Requests, but does not require, to use bitmaps in fonts instead of outlines. + + @param embeddedBitmaps setting to use bitmaps in fonts + */ + void setEmbeddedBitmaps(bool embeddedBitmaps); + + /** Requests, but does not require, that glyphs respect sub-pixel positioning. + + @param subpixel setting for sub-pixel positioning + */ + void setSubpixel(bool subpixel); + + /** Requests, but does not require, linearly scalable font and glyph metrics. + + For outline fonts 'true' means font and glyph metrics should ignore hinting and rounding. + Note that some bitmap formats may not be able to scale linearly and will ignore this flag. + + @param linearMetrics setting for linearly scalable font and glyph metrics. + */ + void setLinearMetrics(bool linearMetrics); + + /** Increases stroke width when creating glyph bitmaps to approximate a bold typeface. + + @param embolden setting for bold approximation + */ + void setEmbolden(bool embolden); + + /** Requests that baselines be snapped to pixels when the current transformation matrix is axis + aligned. + + @param baselineSnap setting for baseline snapping to pixels + */ + void setBaselineSnap(bool baselineSnap); + + /** Whether edge pixels draw opaque or with partial transparency. + */ + Edging getEdging() const { return (Edging)fEdging; } + + /** Requests, but does not require, that edge pixels draw opaque or with + partial transparency. + */ + void setEdging(Edging edging); + + /** Sets level of glyph outline adjustment. + Does not check for valid values of hintingLevel. + */ + void setHinting(SkFontHinting hintingLevel); + + /** Returns level of glyph outline adjustment. + */ + SkFontHinting getHinting() const { return (SkFontHinting)fHinting; } + + /** Returns a font with the same attributes of this font, but with the specified size. + Returns nullptr if size is less than zero, infinite, or NaN. + + @param size typographic height of text + @return initialized SkFont + */ + SkFont makeWithSize(SkScalar size) const; + + /** Returns SkTypeface if set, or nullptr. + Does not alter SkTypeface SkRefCnt. + + @return SkTypeface if previously set, nullptr otherwise + */ + SkTypeface* getTypeface() const {return fTypeface.get(); } + + /** Returns SkTypeface if set, or the default typeface. + Does not alter SkTypeface SkRefCnt. + + @return SkTypeface if previously set or, a pointer to the default typeface if not + previously set. + */ + SkTypeface* getTypefaceOrDefault() const; + + /** Returns text size in points. + + @return typographic height of text + */ + SkScalar getSize() const { return fSize; } + + /** Returns text scale on x-axis. + Default value is 1. + + @return text horizontal scale + */ + SkScalar getScaleX() const { return fScaleX; } + + /** Returns text skew on x-axis. + Default value is zero. + + @return additional shear on x-axis relative to y-axis + */ + SkScalar getSkewX() const { return fSkewX; } + + /** Increases SkTypeface SkRefCnt by one. + + @return SkTypeface if previously set, nullptr otherwise + */ + sk_sp<SkTypeface> refTypeface() const { return fTypeface; } + + /** Increases SkTypeface SkRefCnt by one. + + @return SkTypeface if previously set or, a pointer to the default typeface if not + previously set. + */ + sk_sp<SkTypeface> refTypefaceOrDefault() const; + + /** Sets SkTypeface to typeface, decreasing SkRefCnt of the previous SkTypeface. + Pass nullptr to clear SkTypeface and use the default typeface. Increments + tf SkRefCnt by one. + + @param tf font and style used to draw text + */ + void setTypeface(sk_sp<SkTypeface> tf) { fTypeface = tf; } + + /** Sets text size in points. + Has no effect if textSize is not greater than or equal to zero. + + @param textSize typographic height of text + */ + void setSize(SkScalar textSize); + + /** Sets text scale on x-axis. + Default value is 1. + + @param scaleX text horizontal scale + */ + void setScaleX(SkScalar scaleX); + + /** Sets text skew on x-axis. + Default value is zero. + + @param skewX additional shear on x-axis relative to y-axis + */ + void setSkewX(SkScalar skewX); + + /** Converts text into glyph indices. + Returns the number of glyph indices represented by text. + SkTextEncoding specifies how text represents characters or glyphs. + glyphs may be nullptr, to compute the glyph count. + + Does not check text for valid character codes or valid glyph indices. + + If byteLength equals zero, returns zero. + If byteLength includes a partial character, the partial character is ignored. + + If encoding is SkTextEncoding::kUTF8 and text contains an invalid UTF-8 sequence, + zero is returned. + + When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or + SkTextEncoding::kUTF32; then each Unicode codepoint is mapped to a + single glyph. This function uses the default character-to-glyph + mapping from the SkTypeface and maps characters not found in the + SkTypeface to zero. + + If maxGlyphCount is not sufficient to store all the glyphs, no glyphs are copied. + The total glyph count is returned for subsequent buffer reallocation. + + @param text character storage encoded with SkTextEncoding + @param byteLength length of character storage in bytes + @param glyphs storage for glyph indices; may be nullptr + @param maxGlyphCount storage capacity + @return number of glyphs represented by text of length byteLength + */ + int textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding, + SkGlyphID glyphs[], int maxGlyphCount) const; + + /** Returns glyph index for Unicode character. + + If the character is not supported by the SkTypeface, returns 0. + + @param uni Unicode character + @return glyph index + */ + SkGlyphID unicharToGlyph(SkUnichar uni) const; + + void unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const; + + /** Returns number of glyphs represented by text. + + If encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or + SkTextEncoding::kUTF32; then each Unicode codepoint is mapped to a + single glyph. + + @param text character storage encoded with SkTextEncoding + @param byteLength length of character storage in bytes + @return number of glyphs represented by text of length byteLength + */ + int countText(const void* text, size_t byteLength, SkTextEncoding encoding) const { + return this->textToGlyphs(text, byteLength, encoding, nullptr, 0); + } + + /** Returns the advance width of text. + The advance is the normal distance to move before drawing additional text. + Returns the bounding box of text if bounds is not nullptr. + + @param text character storage encoded with SkTextEncoding + @param byteLength length of character storage in bytes + @param bounds returns bounding box relative to (0, 0) if not nullptr + @return number of glyphs represented by text of length byteLength + */ + SkScalar measureText(const void* text, size_t byteLength, SkTextEncoding encoding, + SkRect* bounds = nullptr) const { + return this->measureText(text, byteLength, encoding, bounds, nullptr); + } + + /** Returns the advance width of text. + The advance is the normal distance to move before drawing additional text. + Returns the bounding box of text if bounds is not nullptr. The paint + stroke settings, mask filter, or path effect may modify the bounds. + + @param text character storage encoded with SkTextEncoding + @param byteLength length of character storage in bytes + @param bounds returns bounding box relative to (0, 0) if not nullptr + @param paint optional; may be nullptr + @return number of glyphs represented by text of length byteLength + */ + SkScalar measureText(const void* text, size_t byteLength, SkTextEncoding encoding, + SkRect* bounds, const SkPaint* paint) const; + + /** DEPRECATED + Retrieves the advance and bounds for each glyph in glyphs. + Both widths and bounds may be nullptr. + If widths is not nullptr, widths must be an array of count entries. + if bounds is not nullptr, bounds must be an array of count entries. + + @param glyphs array of glyph indices to be measured + @param count number of glyphs + @param widths returns text advances for each glyph; may be nullptr + @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr + */ + void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[], SkRect bounds[]) const { + this->getWidthsBounds(glyphs, count, widths, bounds, nullptr); + } + + // DEPRECATED + void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[], std::nullptr_t) const { + this->getWidths(glyphs, count, widths); + } + + /** Retrieves the advance and bounds for each glyph in glyphs. + Both widths and bounds may be nullptr. + If widths is not nullptr, widths must be an array of count entries. + if bounds is not nullptr, bounds must be an array of count entries. + + @param glyphs array of glyph indices to be measured + @param count number of glyphs + @param widths returns text advances for each glyph + */ + void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[]) const { + this->getWidthsBounds(glyphs, count, widths, nullptr, nullptr); + } + + /** Retrieves the advance and bounds for each glyph in glyphs. + Both widths and bounds may be nullptr. + If widths is not nullptr, widths must be an array of count entries. + if bounds is not nullptr, bounds must be an array of count entries. + + @param glyphs array of glyph indices to be measured + @param count number of glyphs + @param widths returns text advances for each glyph; may be nullptr + @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr + @param paint optional, specifies stroking, SkPathEffect and SkMaskFilter + */ + void getWidthsBounds(const SkGlyphID glyphs[], int count, SkScalar widths[], SkRect bounds[], + const SkPaint* paint) const; + + + /** Retrieves the bounds for each glyph in glyphs. + bounds must be an array of count entries. + If paint is not nullptr, its stroking, SkPathEffect, and SkMaskFilter fields are respected. + + @param glyphs array of glyph indices to be measured + @param count number of glyphs + @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr + @param paint optional, specifies stroking, SkPathEffect, and SkMaskFilter + */ + void getBounds(const SkGlyphID glyphs[], int count, SkRect bounds[], + const SkPaint* paint) const { + this->getWidthsBounds(glyphs, count, nullptr, bounds, paint); + } + + /** Retrieves the positions for each glyph, beginning at the specified origin. The caller + must allocated at least count number of elements in the pos[] array. + + @param glyphs array of glyph indices to be positioned + @param count number of glyphs + @param pos returns glyphs positions + @param origin location of the first glyph. Defaults to {0, 0}. + */ + void getPos(const SkGlyphID glyphs[], int count, SkPoint pos[], SkPoint origin = {0, 0}) const; + + /** Retrieves the x-positions for each glyph, beginning at the specified origin. The caller + must allocated at least count number of elements in the xpos[] array. + + @param glyphs array of glyph indices to be positioned + @param count number of glyphs + @param xpos returns glyphs x-positions + @param origin x-position of the first glyph. Defaults to 0. + */ + void getXPos(const SkGlyphID glyphs[], int count, SkScalar xpos[], SkScalar origin = 0) const; + + /** Returns intervals [start, end] describing lines parallel to the advance that intersect + * with the glyphs. + * + * @param glyphs the glyphs to intersect + * @param count the number of glyphs and positions + * @param pos the position of each glyph + * @param top the top of the line intersecting + * @param bottom the bottom of the line intersecting + @return array of pairs of x values [start, end]. May be empty. + */ + std::vector<SkScalar> getIntercepts(const SkGlyphID glyphs[], int count, const SkPoint pos[], + SkScalar top, SkScalar bottom, + const SkPaint* = nullptr) const; + + /** Modifies path to be the outline of the glyph. + If the glyph has an outline, modifies path to be the glyph's outline and returns true. + The glyph outline may be empty. Degenerate contours in the glyph outline will be skipped. + If glyph is described by a bitmap, returns false and ignores path parameter. + + @param glyphID index of glyph + @param path pointer to existing SkPath + @return true if glyphID is described by path + */ + bool getPath(SkGlyphID glyphID, SkPath* path) const; + + /** Returns path corresponding to glyph array. + + @param glyphIDs array of glyph indices + @param count number of glyphs + @param glyphPathProc function returning one glyph description as path + @param ctx function context + */ + void getPaths(const SkGlyphID glyphIDs[], int count, + void (*glyphPathProc)(const SkPath* pathOrNull, const SkMatrix& mx, void* ctx), + void* ctx) const; + + /** Returns SkFontMetrics associated with SkTypeface. + The return value is the recommended spacing between lines: the sum of metrics + descent, ascent, and leading. + If metrics is not nullptr, SkFontMetrics is copied to metrics. + Results are scaled by text size but does not take into account + dimensions required by text scale, text skew, fake bold, + style stroke, and SkPathEffect. + + @param metrics storage for SkFontMetrics; may be nullptr + @return recommended spacing between lines + */ + SkScalar getMetrics(SkFontMetrics* metrics) const; + + /** Returns the recommended spacing between lines: the sum of metrics + descent, ascent, and leading. + Result is scaled by text size but does not take into account + dimensions required by stroking and SkPathEffect. + Returns the same result as getMetrics(). + + @return recommended spacing between lines + */ + SkScalar getSpacing() const { return this->getMetrics(nullptr); } + + /** Dumps fields of the font to SkDebugf. May change its output over time, so clients should + * not rely on this for anything specific. Used to aid in debugging. + */ + void dump() const; + +private: + enum PrivFlags { + kForceAutoHinting_PrivFlag = 1 << 0, + kEmbeddedBitmaps_PrivFlag = 1 << 1, + kSubpixel_PrivFlag = 1 << 2, + kLinearMetrics_PrivFlag = 1 << 3, + kEmbolden_PrivFlag = 1 << 4, + kBaselineSnap_PrivFlag = 1 << 5, + }; + + static constexpr unsigned kAllFlags = kForceAutoHinting_PrivFlag + | kEmbeddedBitmaps_PrivFlag + | kSubpixel_PrivFlag + | kLinearMetrics_PrivFlag + | kEmbolden_PrivFlag + | kBaselineSnap_PrivFlag; + + sk_sp<SkTypeface> fTypeface; + SkScalar fSize; + SkScalar fScaleX; + SkScalar fSkewX; + uint8_t fFlags; + uint8_t fEdging; + uint8_t fHinting; + + SkScalar setupForAsPaths(SkPaint*); + bool hasSomeAntiAliasing() const; + + friend class SkFontPriv; + friend class SkGlyphRunListPainter; + friend class SkStrikeSpec; +}; + +#endif diff --git a/src/deps/skia/include/core/SkFontArguments.h b/src/deps/skia/include/core/SkFontArguments.h new file mode 100644 index 000000000..2aaaf55ca --- /dev/null +++ b/src/deps/skia/include/core/SkFontArguments.h @@ -0,0 +1,62 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontArguments_DEFINED +#define SkFontArguments_DEFINED + +#include "include/core/SkScalar.h" +#include "include/core/SkTypes.h" + +/** Represents a set of actual arguments for a font. */ +struct SkFontArguments { + struct VariationPosition { + struct Coordinate { + SkFourByteTag axis; + float value; + }; + const Coordinate* coordinates; + int coordinateCount; + }; + + SkFontArguments() : fCollectionIndex(0), fVariationDesignPosition{nullptr, 0} {} + + /** Specify the index of the desired font. + * + * Font formats like ttc, dfont, cff, cid, pfr, t42, t1, and fon may actually be indexed + * collections of fonts. + */ + SkFontArguments& setCollectionIndex(int collectionIndex) { + fCollectionIndex = collectionIndex; + return *this; + } + + /** Specify a position in the variation design space. + * + * Any axis not specified will use the default value. + * Any specified axis not actually present in the font will be ignored. + * + * @param position not copied. The value must remain valid for life of SkFontArguments. + */ + SkFontArguments& setVariationDesignPosition(VariationPosition position) { + fVariationDesignPosition.coordinates = position.coordinates; + fVariationDesignPosition.coordinateCount = position.coordinateCount; + return *this; + } + + int getCollectionIndex() const { + return fCollectionIndex; + } + + VariationPosition getVariationDesignPosition() const { + return fVariationDesignPosition; + } +private: + int fCollectionIndex; + VariationPosition fVariationDesignPosition; +}; + +#endif diff --git a/src/deps/skia/include/core/SkFontMetrics.h b/src/deps/skia/include/core/SkFontMetrics.h new file mode 100644 index 000000000..717a87f05 --- /dev/null +++ b/src/deps/skia/include/core/SkFontMetrics.h @@ -0,0 +1,138 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMetrics_DEFINED +#define SkFontMetrics_DEFINED + +#include "include/core/SkScalar.h" + +/** \class SkFontMetrics + The metrics of an SkFont. + The metric values are consistent with the Skia y-down coordinate system. + */ +struct SK_API SkFontMetrics { + bool operator==(const SkFontMetrics& that) { + return + this->fFlags == that.fFlags && + this->fTop == that.fTop && + this->fAscent == that.fAscent && + this->fDescent == that.fDescent && + this->fBottom == that.fBottom && + this->fLeading == that.fLeading && + this->fAvgCharWidth == that.fAvgCharWidth && + this->fMaxCharWidth == that.fMaxCharWidth && + this->fXMin == that.fXMin && + this->fXMax == that.fXMax && + this->fXHeight == that.fXHeight && + this->fCapHeight == that.fCapHeight && + this->fUnderlineThickness == that.fUnderlineThickness && + this->fUnderlinePosition == that.fUnderlinePosition && + this->fStrikeoutThickness == that.fStrikeoutThickness && + this->fStrikeoutPosition == that.fStrikeoutPosition; + } + + /** \enum FontMetricsFlags + FontMetricsFlags indicate when certain metrics are valid; + the underline or strikeout metrics may be valid and zero. + Fonts with embedded bitmaps may not have valid underline or strikeout metrics. + */ + enum FontMetricsFlags { + kUnderlineThicknessIsValid_Flag = 1 << 0, //!< set if fUnderlineThickness is valid + kUnderlinePositionIsValid_Flag = 1 << 1, //!< set if fUnderlinePosition is valid + kStrikeoutThicknessIsValid_Flag = 1 << 2, //!< set if fStrikeoutThickness is valid + kStrikeoutPositionIsValid_Flag = 1 << 3, //!< set if fStrikeoutPosition is valid + kBoundsInvalid_Flag = 1 << 4, //!< set if fTop, fBottom, fXMin, fXMax invalid + }; + + uint32_t fFlags; //!< FontMetricsFlags indicating which metrics are valid + SkScalar fTop; //!< greatest extent above origin of any glyph bounding box, typically negative; deprecated with variable fonts + SkScalar fAscent; //!< distance to reserve above baseline, typically negative + SkScalar fDescent; //!< distance to reserve below baseline, typically positive + SkScalar fBottom; //!< greatest extent below origin of any glyph bounding box, typically positive; deprecated with variable fonts + SkScalar fLeading; //!< distance to add between lines, typically positive or zero + SkScalar fAvgCharWidth; //!< average character width, zero if unknown + SkScalar fMaxCharWidth; //!< maximum character width, zero if unknown + SkScalar fXMin; //!< greatest extent to left of origin of any glyph bounding box, typically negative; deprecated with variable fonts + SkScalar fXMax; //!< greatest extent to right of origin of any glyph bounding box, typically positive; deprecated with variable fonts + SkScalar fXHeight; //!< height of lower-case 'x', zero if unknown, typically negative + SkScalar fCapHeight; //!< height of an upper-case letter, zero if unknown, typically negative + SkScalar fUnderlineThickness; //!< underline thickness + SkScalar fUnderlinePosition; //!< distance from baseline to top of stroke, typically positive + SkScalar fStrikeoutThickness; //!< strikeout thickness + SkScalar fStrikeoutPosition; //!< distance from baseline to bottom of stroke, typically negative + + /** Returns true if SkFontMetrics has a valid underline thickness, and sets + thickness to that value. If the underline thickness is not valid, + return false, and ignore thickness. + + @param thickness storage for underline width + @return true if font specifies underline width + */ + bool hasUnderlineThickness(SkScalar* thickness) const { + if (SkToBool(fFlags & kUnderlineThicknessIsValid_Flag)) { + *thickness = fUnderlineThickness; + return true; + } + return false; + } + + /** Returns true if SkFontMetrics has a valid underline position, and sets + position to that value. If the underline position is not valid, + return false, and ignore position. + + @param position storage for underline position + @return true if font specifies underline position + */ + bool hasUnderlinePosition(SkScalar* position) const { + if (SkToBool(fFlags & kUnderlinePositionIsValid_Flag)) { + *position = fUnderlinePosition; + return true; + } + return false; + } + + /** Returns true if SkFontMetrics has a valid strikeout thickness, and sets + thickness to that value. If the underline thickness is not valid, + return false, and ignore thickness. + + @param thickness storage for strikeout width + @return true if font specifies strikeout width + */ + bool hasStrikeoutThickness(SkScalar* thickness) const { + if (SkToBool(fFlags & kStrikeoutThicknessIsValid_Flag)) { + *thickness = fStrikeoutThickness; + return true; + } + return false; + } + + /** Returns true if SkFontMetrics has a valid strikeout position, and sets + position to that value. If the underline position is not valid, + return false, and ignore position. + + @param position storage for strikeout position + @return true if font specifies strikeout position + */ + bool hasStrikeoutPosition(SkScalar* position) const { + if (SkToBool(fFlags & kStrikeoutPositionIsValid_Flag)) { + *position = fStrikeoutPosition; + return true; + } + return false; + } + + /** Returns true if SkFontMetrics has a valid fTop, fBottom, fXMin, and fXMax. + If the bounds are not valid, return false. + + @return true if font specifies maximum glyph bounds + */ + bool hasBounds() const { + return !SkToBool(fFlags & kBoundsInvalid_Flag); + } +}; + +#endif diff --git a/src/deps/skia/include/core/SkFontMgr.h b/src/deps/skia/include/core/SkFontMgr.h new file mode 100644 index 000000000..611faa3aa --- /dev/null +++ b/src/deps/skia/include/core/SkFontMgr.h @@ -0,0 +1,157 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_DEFINED +#define SkFontMgr_DEFINED + +#include "include/core/SkFontArguments.h" +#include "include/core/SkFontStyle.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" + +class SkData; +class SkFontData; +class SkStreamAsset; +class SkString; +class SkTypeface; + +class SK_API SkFontStyleSet : public SkRefCnt { +public: + virtual int count() = 0; + virtual void getStyle(int index, SkFontStyle*, SkString* style) = 0; + virtual SkTypeface* createTypeface(int index) = 0; + virtual SkTypeface* matchStyle(const SkFontStyle& pattern) = 0; + + static SkFontStyleSet* CreateEmpty(); + +protected: + SkTypeface* matchStyleCSS3(const SkFontStyle& pattern); + +private: + using INHERITED = SkRefCnt; +}; + +class SK_API SkFontMgr : public SkRefCnt { +public: + int countFamilies() const; + void getFamilyName(int index, SkString* familyName) const; + SkFontStyleSet* createStyleSet(int index) const; + + /** + * The caller must call unref() on the returned object. + * Never returns NULL; will return an empty set if the name is not found. + * + * Passing nullptr as the parameter will return the default system family. + * Note that most systems don't have a default system family, so passing nullptr will often + * result in the empty set. + * + * It is possible that this will return a style set not accessible from + * createStyleSet(int) due to hidden or auto-activated fonts. + */ + SkFontStyleSet* matchFamily(const char familyName[]) const; + + /** + * Find the closest matching typeface to the specified familyName and style + * and return a ref to it. The caller must call unref() on the returned + * object. Will return nullptr if no 'good' match is found. + * + * Passing |nullptr| as the parameter for |familyName| will return the + * default system font. + * + * It is possible that this will return a style set not accessible from + * createStyleSet(int) or matchFamily(const char[]) due to hidden or + * auto-activated fonts. + */ + SkTypeface* matchFamilyStyle(const char familyName[], const SkFontStyle&) const; + + /** + * Use the system fallback to find a typeface for the given character. + * Note that bcp47 is a combination of ISO 639, 15924, and 3166-1 codes, + * so it is fine to just pass a ISO 639 here. + * + * Will return NULL if no family can be found for the character + * in the system fallback. + * + * Passing |nullptr| as the parameter for |familyName| will return the + * default system font. + * + * bcp47[0] is the least significant fallback, bcp47[bcp47Count-1] is the + * most significant. If no specified bcp47 codes match, any font with the + * requested character will be matched. + */ + SkTypeface* matchFamilyStyleCharacter(const char familyName[], const SkFontStyle&, + const char* bcp47[], int bcp47Count, + SkUnichar character) const; + + /** + * Create a typeface for the specified data and TTC index (pass 0 for none) + * or NULL if the data is not recognized. The caller must call unref() on + * the returned object if it is not null. + */ + sk_sp<SkTypeface> makeFromData(sk_sp<SkData>, int ttcIndex = 0) const; + + /** + * Create a typeface for the specified stream and TTC index + * (pass 0 for none) or NULL if the stream is not recognized. The caller + * must call unref() on the returned object if it is not null. + */ + sk_sp<SkTypeface> makeFromStream(std::unique_ptr<SkStreamAsset>, int ttcIndex = 0) const; + + /* Experimental, API subject to change. */ + sk_sp<SkTypeface> makeFromStream(std::unique_ptr<SkStreamAsset>, const SkFontArguments&) const; + + /** + * Create a typeface for the specified fileName and TTC index + * (pass 0 for none) or NULL if the file is not found, or its contents are + * not recognized. The caller must call unref() on the returned object + * if it is not null. + */ + sk_sp<SkTypeface> makeFromFile(const char path[], int ttcIndex = 0) const; + + sk_sp<SkTypeface> legacyMakeTypeface(const char familyName[], SkFontStyle style) const; + + /** Return the default fontmgr. */ + static sk_sp<SkFontMgr> RefDefault(); + +protected: + virtual int onCountFamilies() const = 0; + virtual void onGetFamilyName(int index, SkString* familyName) const = 0; + virtual SkFontStyleSet* onCreateStyleSet(int index)const = 0; + + /** May return NULL if the name is not found. */ + virtual SkFontStyleSet* onMatchFamily(const char familyName[]) const = 0; + + virtual SkTypeface* onMatchFamilyStyle(const char familyName[], + const SkFontStyle&) const = 0; + virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&, + const char* bcp47[], int bcp47Count, + SkUnichar character) const = 0; + + virtual sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const = 0; + virtual sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, + int ttcIndex) const = 0; + virtual sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>, + const SkFontArguments&) const = 0; + virtual sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const = 0; + + virtual sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const = 0; + + // this method is never called -- will be removed + virtual SkTypeface* onMatchFaceStyle(const SkTypeface*, + const SkFontStyle&) const { + return nullptr; + } + +private: + + /** Implemented by porting layer to return the default factory. */ + static sk_sp<SkFontMgr> Factory(); + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/core/SkFontParameters.h b/src/deps/skia/include/core/SkFontParameters.h new file mode 100644 index 000000000..ae4f1d68b --- /dev/null +++ b/src/deps/skia/include/core/SkFontParameters.h @@ -0,0 +1,42 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontParameters_DEFINED +#define SkFontParameters_DEFINED + +#include "include/core/SkScalar.h" +#include "include/core/SkTypes.h" + +struct SkFontParameters { + struct Variation { + // Parameters in a variation font axis. + struct Axis { + constexpr Axis() : tag(0), min(0), def(0), max(0), flags(0) {} + constexpr Axis(SkFourByteTag tag, float min, float def, float max, bool hidden) : + tag(tag), min(min), def(def), max(max), flags(hidden ? HIDDEN : 0) {} + + // Four character identifier of the font axis (weight, width, slant, italic...). + SkFourByteTag tag; + // Minimum value supported by this axis. + float min; + // Default value set by this axis. + float def; + // Maximum value supported by this axis. The maximum can equal the minimum. + float max; + // Return whether this axis is recommended to be remain hidden in user interfaces. + bool isHidden() const { return flags & HIDDEN; } + // Set this axis to be remain hidden in user interfaces. + void setHidden(bool hidden) { flags = hidden ? (flags | HIDDEN) : (flags & ~HIDDEN); } + private: + static constexpr uint16_t HIDDEN = 0x0001; + // Attributes for a font axis. + uint16_t flags; + }; + }; +}; + +#endif diff --git a/src/deps/skia/include/core/SkFontStyle.h b/src/deps/skia/include/core/SkFontStyle.h new file mode 100644 index 000000000..04893ef2f --- /dev/null +++ b/src/deps/skia/include/core/SkFontStyle.h @@ -0,0 +1,81 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontStyle_DEFINED +#define SkFontStyle_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkTPin.h" + +class SK_API SkFontStyle { +public: + enum Weight { + kInvisible_Weight = 0, + kThin_Weight = 100, + kExtraLight_Weight = 200, + kLight_Weight = 300, + kNormal_Weight = 400, + kMedium_Weight = 500, + kSemiBold_Weight = 600, + kBold_Weight = 700, + kExtraBold_Weight = 800, + kBlack_Weight = 900, + kExtraBlack_Weight = 1000, + }; + + enum Width { + kUltraCondensed_Width = 1, + kExtraCondensed_Width = 2, + kCondensed_Width = 3, + kSemiCondensed_Width = 4, + kNormal_Width = 5, + kSemiExpanded_Width = 6, + kExpanded_Width = 7, + kExtraExpanded_Width = 8, + kUltraExpanded_Width = 9, + }; + + enum Slant { + kUpright_Slant, + kItalic_Slant, + kOblique_Slant, + }; + + constexpr SkFontStyle(int weight, int width, Slant slant) : fValue( + (SkTPin<int>(weight, kInvisible_Weight, kExtraBlack_Weight)) + + (SkTPin<int>(width, kUltraCondensed_Width, kUltraExpanded_Width) << 16) + + (SkTPin<int>(slant, kUpright_Slant, kOblique_Slant) << 24) + ) { } + + constexpr SkFontStyle() : SkFontStyle{kNormal_Weight, kNormal_Width, kUpright_Slant} { } + + bool operator==(const SkFontStyle& rhs) const { + return fValue == rhs.fValue; + } + + int weight() const { return fValue & 0xFFFF; } + int width() const { return (fValue >> 16) & 0xFF; } + Slant slant() const { return (Slant)((fValue >> 24) & 0xFF); } + + static constexpr SkFontStyle Normal() { + return SkFontStyle(kNormal_Weight, kNormal_Width, kUpright_Slant); + } + static constexpr SkFontStyle Bold() { + return SkFontStyle(kBold_Weight, kNormal_Width, kUpright_Slant); + } + static constexpr SkFontStyle Italic() { + return SkFontStyle(kNormal_Weight, kNormal_Width, kItalic_Slant ); + } + static constexpr SkFontStyle BoldItalic() { + return SkFontStyle(kBold_Weight, kNormal_Width, kItalic_Slant ); + } + +private: + int32_t fValue; +}; + +#endif diff --git a/src/deps/skia/include/core/SkFontTypes.h b/src/deps/skia/include/core/SkFontTypes.h new file mode 100644 index 000000000..76f5dde67 --- /dev/null +++ b/src/deps/skia/include/core/SkFontTypes.h @@ -0,0 +1,25 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontTypes_DEFINED +#define SkFontTypes_DEFINED + +enum class SkTextEncoding { + kUTF8, //!< uses bytes to represent UTF-8 or ASCII + kUTF16, //!< uses two byte words to represent most of Unicode + kUTF32, //!< uses four byte words to represent all of Unicode + kGlyphID, //!< uses two byte words to represent glyph indices +}; + +enum class SkFontHinting { + kNone, //!< glyph outlines unchanged + kSlight, //!< minimal modification to improve constrast + kNormal, //!< glyph outlines modified to improve constrast + kFull, //!< modifies glyph outlines for maximum constrast +}; + +#endif diff --git a/src/deps/skia/include/core/SkGraphics.h b/src/deps/skia/include/core/SkGraphics.h new file mode 100644 index 000000000..b68bb78ba --- /dev/null +++ b/src/deps/skia/include/core/SkGraphics.h @@ -0,0 +1,153 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkGraphics_DEFINED +#define SkGraphics_DEFINED + +#include "include/core/SkRefCnt.h" + +class SkData; +class SkImageGenerator; +class SkTraceMemoryDump; + +class SK_API SkGraphics { +public: + /** + * Call this at process initialization time if your environment does not + * permit static global initializers that execute code. + * Init() is thread-safe and idempotent. + */ + static void Init(); + + /** + * Return the max number of bytes that should be used by the font cache. + * If the cache needs to allocate more, it will purge previous entries. + * This max can be changed by calling SetFontCacheLimit(). + */ + static size_t GetFontCacheLimit(); + + /** + * Specify the max number of bytes that should be used by the font cache. + * If the cache needs to allocate more, it will purge previous entries. + * + * This function returns the previous setting, as if GetFontCacheLimit() + * had be called before the new limit was set. + */ + static size_t SetFontCacheLimit(size_t bytes); + + /** + * Return the number of bytes currently used by the font cache. + */ + static size_t GetFontCacheUsed(); + + /** + * Return the number of entries in the font cache. + * A cache "entry" is associated with each typeface + pointSize + matrix. + */ + static int GetFontCacheCountUsed(); + + /** + * Return the current limit to the number of entries in the font cache. + * A cache "entry" is associated with each typeface + pointSize + matrix. + */ + static int GetFontCacheCountLimit(); + + /** + * Set the limit to the number of entries in the font cache, and return + * the previous value. If this new value is lower than the previous, + * it will automatically try to purge entries to meet the new limit. + */ + static int SetFontCacheCountLimit(int count); + + /** + * For debugging purposes, this will attempt to purge the font cache. It + * does not change the limit, but will cause subsequent font measures and + * draws to be recreated, since they will no longer be in the cache. + */ + static void PurgeFontCache(); + + /** + * This function returns the memory used for temporary images and other resources. + */ + static size_t GetResourceCacheTotalBytesUsed(); + + /** + * These functions get/set the memory usage limit for the resource cache, used for temporary + * bitmaps and other resources. Entries are purged from the cache when the memory useage + * exceeds this limit. + */ + static size_t GetResourceCacheTotalByteLimit(); + static size_t SetResourceCacheTotalByteLimit(size_t newLimit); + + /** + * For debugging purposes, this will attempt to purge the resource cache. It + * does not change the limit. + */ + static void PurgeResourceCache(); + + /** + * When the cachable entry is very lage (e.g. a large scaled bitmap), adding it to the cache + * can cause most/all of the existing entries to be purged. To avoid the, the client can set + * a limit for a single allocation. If a cacheable entry would have been cached, but its size + * exceeds this limit, then we do not attempt to cache it at all. + * + * Zero is the default value, meaning we always attempt to cache entries. + */ + static size_t GetResourceCacheSingleAllocationByteLimit(); + static size_t SetResourceCacheSingleAllocationByteLimit(size_t newLimit); + + /** + * Dumps memory usage of caches using the SkTraceMemoryDump interface. See SkTraceMemoryDump + * for usage of this method. + */ + static void DumpMemoryStatistics(SkTraceMemoryDump* dump); + + /** + * Free as much globally cached memory as possible. This will purge all private caches in Skia, + * including font and image caches. + * + * If there are caches associated with GPU context, those will not be affected by this call. + */ + static void PurgeAllCaches(); + + /** + * Applications with command line options may pass optional state, such + * as cache sizes, here, for instance: + * font-cache-limit=12345678 + * + * The flags format is name=value[;name=value...] with no spaces. + * This format is subject to change. + */ + static void SetFlags(const char* flags); + + typedef std::unique_ptr<SkImageGenerator> + (*ImageGeneratorFromEncodedDataFactory)(sk_sp<SkData>); + + /** + * To instantiate images from encoded data, first looks at this runtime function-ptr. If it + * exists, it is called to create an SkImageGenerator from SkData. If there is no function-ptr + * or there is, but it returns NULL, then skia will call its internal default implementation. + * + * Returns the previous factory (which could be NULL). + */ + static ImageGeneratorFromEncodedDataFactory + SetImageGeneratorFromEncodedDataFactory(ImageGeneratorFromEncodedDataFactory); + + /** + * Call early in main() to allow Skia to use a JIT to accelerate CPU-bound operations. + */ + static void AllowJIT(); +}; + +class SkAutoGraphics { +public: + SkAutoGraphics() { + SkGraphics::Init(); + } +}; + +#endif diff --git a/src/deps/skia/include/core/SkICC.h b/src/deps/skia/include/core/SkICC.h new file mode 100644 index 000000000..cb84c1ffb --- /dev/null +++ b/src/deps/skia/include/core/SkICC.h @@ -0,0 +1,19 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkICC_DEFINED +#define SkICC_DEFINED + +#include "include/core/SkData.h" + +struct skcms_Matrix3x3; +struct skcms_TransferFunction; + +SK_API sk_sp<SkData> SkWriteICCProfile(const skcms_TransferFunction&, + const skcms_Matrix3x3& toXYZD50); + +#endif//SkICC_DEFINED diff --git a/src/deps/skia/include/core/SkImage.h b/src/deps/skia/include/core/SkImage.h new file mode 100644 index 000000000..bdcfff3fa --- /dev/null +++ b/src/deps/skia/include/core/SkImage.h @@ -0,0 +1,1302 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImage_DEFINED +#define SkImage_DEFINED + +#include "include/core/SkImageEncoder.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkSamplingOptions.h" +#include "include/core/SkScalar.h" +#include "include/core/SkShader.h" +#include "include/core/SkTileMode.h" +#include "include/private/SkTOptional.h" +#if SK_SUPPORT_GPU +#include "include/gpu/GrTypes.h" +#endif +#include <functional> // std::function + +#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26 +#include <android/hardware_buffer.h> +#endif + +class SkData; +class SkCanvas; +class SkImage; +class SkImageFilter; +class SkImageGenerator; +class SkMipmap; +class SkPaint; +class SkPicture; +class SkPromiseImageTexture; +class SkSurface; +class SkYUVAPixmaps; +class GrBackendFormat; +class GrBackendTexture; +class GrDirectContext; +class GrRecordingContext; +class GrContextThreadSafeProxy; +class GrYUVABackendTextureInfo; +class GrYUVABackendTextures; + +/** \class SkImage + SkImage describes a two dimensional array of pixels to draw. The pixels may be + decoded in a raster bitmap, encoded in a SkPicture or compressed data stream, + or located in GPU memory as a GPU texture. + + SkImage cannot be modified after it is created. SkImage may allocate additional + storage as needed; for instance, an encoded SkImage may decode when drawn. + + SkImage width and height are greater than zero. Creating an SkImage with zero width + or height returns SkImage equal to nullptr. + + SkImage may be created from SkBitmap, SkPixmap, SkSurface, SkPicture, encoded streams, + GPU texture, YUV_ColorSpace data, or hardware buffer. Encoded streams supported + include BMP, GIF, HEIF, ICO, JPEG, PNG, WBMP, WebP. Supported encoding details + vary with platform. +*/ +class SK_API SkImage : public SkRefCnt { +public: + + /** Caller data passed to RasterReleaseProc; may be nullptr. + */ + typedef void* ReleaseContext; + + /** Creates SkImage from SkPixmap and copy of pixels. Since pixels are copied, SkPixmap + pixels may be modified or deleted without affecting SkImage. + + SkImage is returned if SkPixmap is valid. Valid SkPixmap parameters include: + dimensions are greater than zero; + each dimension fits in 29 bits; + SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType; + row bytes are large enough to hold one row of pixels; + pixel address is not nullptr. + + @param pixmap SkImageInfo, pixel address, and row bytes + @return copy of SkPixmap pixels, or nullptr + + example: https://fiddle.skia.org/c/@Image_MakeRasterCopy + */ + static sk_sp<SkImage> MakeRasterCopy(const SkPixmap& pixmap); + + /** Creates SkImage from SkImageInfo, sharing pixels. + + SkImage is returned if SkImageInfo is valid. Valid SkImageInfo parameters include: + dimensions are greater than zero; + each dimension fits in 29 bits; + SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType; + rowBytes are large enough to hold one row of pixels; + pixels is not nullptr, and contains enough data for SkImage. + + @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace + @param pixels address or pixel storage + @param rowBytes size of pixel row or larger + @return SkImage sharing pixels, or nullptr + */ + static sk_sp<SkImage> MakeRasterData(const SkImageInfo& info, sk_sp<SkData> pixels, + size_t rowBytes); + + /** Function called when SkImage no longer shares pixels. ReleaseContext is + provided by caller when SkImage is created, and may be nullptr. + */ + typedef void (*RasterReleaseProc)(const void* pixels, ReleaseContext); + + /** Creates SkImage from pixmap, sharing SkPixmap pixels. Pixels must remain valid and + unchanged until rasterReleaseProc is called. rasterReleaseProc is passed + releaseContext when SkImage is deleted or no longer refers to pixmap pixels. + + Pass nullptr for rasterReleaseProc to share SkPixmap without requiring a callback + when SkImage is released. Pass nullptr for releaseContext if rasterReleaseProc + does not require state. + + SkImage is returned if pixmap is valid. Valid SkPixmap parameters include: + dimensions are greater than zero; + each dimension fits in 29 bits; + SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType; + row bytes are large enough to hold one row of pixels; + pixel address is not nullptr. + + @param pixmap SkImageInfo, pixel address, and row bytes + @param rasterReleaseProc function called when pixels can be released; or nullptr + @param releaseContext state passed to rasterReleaseProc; or nullptr + @return SkImage sharing pixmap + */ + static sk_sp<SkImage> MakeFromRaster(const SkPixmap& pixmap, + RasterReleaseProc rasterReleaseProc, + ReleaseContext releaseContext); + + /** Creates SkImage from bitmap, sharing or copying bitmap pixels. If the bitmap + is marked immutable, and its pixel memory is shareable, it may be shared + instead of copied. + + SkImage is returned if bitmap is valid. Valid SkBitmap parameters include: + dimensions are greater than zero; + each dimension fits in 29 bits; + SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType; + row bytes are large enough to hold one row of pixels; + pixel address is not nullptr. + + @param bitmap SkImageInfo, row bytes, and pixels + @return created SkImage, or nullptr + + example: https://fiddle.skia.org/c/@Image_MakeFromBitmap + */ + static sk_sp<SkImage> MakeFromBitmap(const SkBitmap& bitmap); + + /** Creates SkImage from data returned by imageGenerator. Generated data is owned by SkImage and + may not be shared or accessed. + + SkImage is returned if generator data is valid. Valid data parameters vary by type of data + and platform. + + imageGenerator may wrap SkPicture data, codec data, or custom data. + + @param imageGenerator stock or custom routines to retrieve SkImage + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromGenerator(std::unique_ptr<SkImageGenerator> imageGenerator); + + /** + * Return an image backed by the encoded data, but attempt to defer decoding until the image + * is actually used/drawn. This deferral allows the system to cache the result, either on the + * CPU or on the GPU, depending on where the image is drawn. If memory is low, the cache may + * be purged, causing the next draw of the image to have to re-decode. + * + * If alphaType is nullopt, the image's alpha type will be chosen automatically based on the + * image format. Transparent images will default to kPremul_SkAlphaType. If alphaType contains + * kPremul_SkAlphaType or kUnpremul_SkAlphaType, that alpha type will be used. Forcing opaque + * (passing kOpaque_SkAlphaType) is not allowed, and will return nullptr. + * + * This is similar to DecodeTo[Raster,Texture], but this method will attempt to defer the + * actual decode, while the DecodeTo... method explicitly decode and allocate the backend + * when the call is made. + * + * If the encoded format is not supported, nullptr is returned. + * + * @param encoded the encoded data + * @return created SkImage, or nullptr + + example: https://fiddle.skia.org/c/@Image_MakeFromEncoded + */ + static sk_sp<SkImage> MakeFromEncoded(sk_sp<SkData> encoded, + skstd::optional<SkAlphaType> alphaType = skstd::nullopt); + + /* + * Experimental: + * Skia | GL_COMPRESSED_* | MTLPixelFormat* | VK_FORMAT_*_BLOCK + * -------------------------------------------------------------------------------------- + * kETC2_RGB8_UNORM | ETC1_RGB8 | ETC2_RGB8 (iOS-only) | ETC2_R8G8B8_UNORM + * | RGB8_ETC2 | | + * -------------------------------------------------------------------------------------- + * kBC1_RGB8_UNORM | RGB_S3TC_DXT1_EXT | N/A | BC1_RGB_UNORM + * -------------------------------------------------------------------------------------- + * kBC1_RGBA8_UNORM | RGBA_S3TC_DXT1_EXT | BC1_RGBA (macOS-only)| BC1_RGBA_UNORM + */ + enum class CompressionType { + kNone, + kETC2_RGB8_UNORM, // the same as ETC1 + + kBC1_RGB8_UNORM, + kBC1_RGBA8_UNORM, + kLast = kBC1_RGBA8_UNORM, + }; + + static constexpr int kCompressionTypeCount = static_cast<int>(CompressionType::kLast) + 1; + + static const CompressionType kETC1_CompressionType = CompressionType::kETC2_RGB8_UNORM; + + /** Creates a CPU-backed SkImage from compressed data. + + This method will decompress the compressed data and create an image wrapping + it. Any mipmap levels present in the compressed data are discarded. + + @param data compressed data to store in SkImage + @param width width of full SkImage + @param height height of full SkImage + @param type type of compression used + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeRasterFromCompressed(sk_sp<SkData> data, + int width, int height, + CompressionType type); + + enum class BitDepth { + kU8, //!< uses 8-bit unsigned int per color component + kF16, //!< uses 16-bit float per color component + }; + + /** Creates SkImage from picture. Returned SkImage width and height are set by dimensions. + SkImage draws picture with matrix and paint, set to bitDepth and colorSpace. + + If matrix is nullptr, draws with identity SkMatrix. If paint is nullptr, draws + with default SkPaint. colorSpace may be nullptr. + + @param picture stream of drawing commands + @param dimensions width and height + @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr + @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr + @param bitDepth 8-bit integer or 16-bit float: per component + @param colorSpace range of colors; may be nullptr + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromPicture(sk_sp<SkPicture> picture, const SkISize& dimensions, + const SkMatrix* matrix, const SkPaint* paint, + BitDepth bitDepth, + sk_sp<SkColorSpace> colorSpace); + +#if SK_SUPPORT_GPU + /** Creates a GPU-backed SkImage from compressed data. + + This method will return an SkImage representing the compressed data. + If the GPU doesn't support the specified compression method, the data + will be decompressed and then wrapped in a GPU-backed image. + + Note: one can query the supported compression formats via + GrRecordingContext::compressedBackendFormat. + + @param context GPU context + @param data compressed data to store in SkImage + @param width width of full SkImage + @param height height of full SkImage + @param type type of compression used + @param mipMapped does 'data' contain data for all the mipmap levels? + @param isProtected do the contents of 'data' require DRM protection (on Vulkan)? + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeTextureFromCompressed(GrDirectContext* direct, + sk_sp<SkData> data, + int width, int height, + CompressionType type, + GrMipmapped mipMapped = GrMipmapped::kNo, + GrProtected isProtected = GrProtected::kNo); + + /** User function called when supplied texture may be deleted. + */ + typedef void (*TextureReleaseProc)(ReleaseContext releaseContext); + + /** Creates SkImage from GPU texture associated with context. GPU texture must stay + valid and unchanged until textureReleaseProc is called. textureReleaseProc is + passed releaseContext when SkImage is deleted or no longer refers to texture. + + SkImage is returned if format of backendTexture is recognized and supported. + Recognized formats vary by GPU back-end. + + @note When using a DDL recording context, textureReleaseProc will be called on the + GPU thread after the DDL is played back on the direct context. + + @param context GPU context + @param backendTexture texture residing on GPU + @param colorSpace This describes the color space of this image's contents, as + seen after sampling. In general, if the format of the backend + texture is SRGB, some linear colorSpace should be supplied + (e.g., SkColorSpace::MakeSRGBLinear()). If the format of the + backend texture is linear, then the colorSpace should include + a description of the transfer function as + well (e.g., SkColorSpace::MakeSRGB()). + @param textureReleaseProc function called when texture can be released + @param releaseContext state passed to textureReleaseProc + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromTexture(GrRecordingContext* context, + const GrBackendTexture& backendTexture, + GrSurfaceOrigin origin, + SkColorType colorType, + SkAlphaType alphaType, + sk_sp<SkColorSpace> colorSpace, + TextureReleaseProc textureReleaseProc = nullptr, + ReleaseContext releaseContext = nullptr); + + /** Creates an SkImage from a GPU backend texture. The backend texture must stay + valid and unchanged until textureReleaseProc is called. The textureReleaseProc is + called when the SkImage is deleted or no longer refers to the texture and will be + passed the releaseContext. + + An SkImage is returned if the format of backendTexture is recognized and supported. + Recognized formats vary by GPU back-end. + + @note When using a DDL recording context, textureReleaseProc will be called on the + GPU thread after the DDL is played back on the direct context. + + @param context the GPU context + @param backendTexture a texture already allocated by the GPU + @param alphaType This characterizes the nature of the alpha values in the + backend texture. For opaque compressed formats (e.g., ETC1) + this should usually be set to kOpaque_SkAlphaType. + @param colorSpace This describes the color space of this image's contents, as + seen after sampling. In general, if the format of the backend + texture is SRGB, some linear colorSpace should be supplied + (e.g., SkColorSpace::MakeSRGBLinear()). If the format of the + backend texture is linear, then the colorSpace should include + a description of the transfer function as + well (e.g., SkColorSpace::MakeSRGB()). + @param textureReleaseProc function called when the backend texture can be released + @param releaseContext state passed to textureReleaseProc + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromCompressedTexture(GrRecordingContext* context, + const GrBackendTexture& backendTexture, + GrSurfaceOrigin origin, + SkAlphaType alphaType, + sk_sp<SkColorSpace> colorSpace, + TextureReleaseProc textureReleaseProc = nullptr, + ReleaseContext releaseContext = nullptr); + + /** Creates SkImage from pixmap. SkImage is uploaded to GPU back-end using context. + + Created SkImage is available to other GPU contexts, and is available across thread + boundaries. All contexts must be in the same GPU share group, or otherwise + share resources. + + When SkImage is no longer referenced, context releases texture memory + asynchronously. + + GrBackendTexture created from pixmap is uploaded to match SkSurface created with + dstColorSpace. SkColorSpace of SkImage is determined by pixmap.colorSpace(). + + SkImage is returned referring to GPU back-end if context is not nullptr, + format of data is recognized and supported, and if context supports moving + resources between contexts. Otherwise, pixmap pixel data is copied and SkImage + as returned in raster format if possible; nullptr may be returned. + Recognized GPU formats vary by platform and GPU back-end. + + @param context GPU context + @param pixmap SkImageInfo, pixel address, and row bytes + @param buildMips create SkImage as mip map if true + @param dstColorSpace range of colors of matching SkSurface on GPU + @param limitToMaxTextureSize downscale image to GPU maximum texture size, if necessary + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeCrossContextFromPixmap(GrDirectContext* context, + const SkPixmap& pixmap, + bool buildMips, + bool limitToMaxTextureSize = false); + + /** Creates SkImage from backendTexture associated with context. backendTexture and + returned SkImage are managed internally, and are released when no longer needed. + + SkImage is returned if format of backendTexture is recognized and supported. + Recognized formats vary by GPU back-end. + + @param context GPU context + @param backendTexture texture residing on GPU + @param textureOrigin origin of backendTexture + @param colorType color type of the resulting image + @param alphaType alpha type of the resulting image + @param colorSpace range of colors; may be nullptr + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromAdoptedTexture(GrRecordingContext* context, + const GrBackendTexture& backendTexture, + GrSurfaceOrigin textureOrigin, + SkColorType colorType, + SkAlphaType alphaType = kPremul_SkAlphaType, + sk_sp<SkColorSpace> colorSpace = nullptr); + + /** Creates an SkImage from YUV[A] planar textures. This requires that the textures stay valid + for the lifetime of the image. The ReleaseContext can be used to know when it is safe to + either delete or overwrite the textures. If ReleaseProc is provided it is also called before + return on failure. + + @param context GPU context + @param yuvaTextures A set of textures containing YUVA data and a description of the + data and transformation to RGBA. + @param imageColorSpace range of colors of the resulting image after conversion to RGB; + may be nullptr + @param textureReleaseProc called when the backend textures can be released + @param releaseContext state passed to textureReleaseProc + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromYUVATextures(GrRecordingContext* context, + const GrYUVABackendTextures& yuvaTextures, + sk_sp<SkColorSpace> imageColorSpace = nullptr, + TextureReleaseProc textureReleaseProc = nullptr, + ReleaseContext releaseContext = nullptr); + + /** Creates SkImage from SkYUVAPixmaps. + + The image will remain planar with each plane converted to a texture using the passed + GrRecordingContext. + + SkYUVAPixmaps has a SkYUVAInfo which specifies the transformation from YUV to RGB. + The SkColorSpace of the resulting RGB values is specified by imageColorSpace. This will + be the SkColorSpace reported by the image and when drawn the RGB values will be converted + from this space into the destination space (if the destination is tagged). + + Currently, this is only supported using the GPU backend and will fail if context is nullptr. + + SkYUVAPixmaps does not need to remain valid after this returns. + + @param context GPU context + @param pixmaps The planes as pixmaps with supported SkYUVAInfo that + specifies conversion to RGB. + @param buildMips create internal YUVA textures as mip map if kYes. This is + silently ignored if the context does not support mip maps. + @param limitToMaxTextureSize downscale image to GPU maximum texture size, if necessary + @param imageColorSpace range of colors of the resulting image; may be nullptr + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromYUVAPixmaps(GrRecordingContext* context, + const SkYUVAPixmaps& pixmaps, + GrMipMapped buildMips = GrMipmapped::kNo, + bool limitToMaxTextureSize = false, + sk_sp<SkColorSpace> imageColorSpace = nullptr); + + using PromiseImageTextureContext = void*; + using PromiseImageTextureFulfillProc = + sk_sp<SkPromiseImageTexture> (*)(PromiseImageTextureContext); + using PromiseImageTextureReleaseProc = void (*)(PromiseImageTextureContext); + + /** Create a new SkImage that is very similar to an SkImage created by MakeFromTexture. The + difference is that the caller need not have created the texture nor populated it with the + image pixel data. Moreover, the SkImage may be created on a thread as the creation of the + image does not require access to the backend API or GrDirectContext. Instead of passing a + GrBackendTexture the client supplies a description of the texture consisting of + GrBackendFormat, width, height, and GrMipmapped state. The resulting SkImage can be drawn + to a SkDeferredDisplayListRecorder or directly to a GPU-backed SkSurface. + + When the actual texture is required to perform a backend API draw, textureFulfillProc will + be called to receive a GrBackendTexture. The properties of the GrBackendTexture must match + those set during the SkImage creation, and it must refer to a valid existing texture in the + backend API context/device, and be populated with the image pixel data. The texture cannot + be deleted until textureReleaseProc is called. + + There is at most one call to each of textureFulfillProc and textureReleaseProc. + textureReleaseProc is always called even if image creation fails or if the + image is never fulfilled (e.g. it is never drawn or all draws are clipped out) + + @param gpuContextProxy the thread-safe proxy of the gpu context. required. + @param backendFormat format of promised gpu texture + @param dimensions width & height of promised gpu texture + @param mipMapped mip mapped state of promised gpu texture + @param origin surface origin of promised gpu texture + @param colorType color type of promised gpu texture + @param alphaType alpha type of promised gpu texture + @param colorSpace range of colors; may be nullptr + @param textureFulfillProc function called to get actual gpu texture + @param textureReleaseProc function called when texture can be deleted + @param textureContext state passed to textureFulfillProc and textureReleaseProc + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakePromiseTexture(sk_sp<GrContextThreadSafeProxy> gpuContextProxy, + const GrBackendFormat& backendFormat, + SkISize dimensions, + GrMipmapped mipMapped, + GrSurfaceOrigin origin, + SkColorType colorType, + SkAlphaType alphaType, + sk_sp<SkColorSpace> colorSpace, + PromiseImageTextureFulfillProc textureFulfillProc, + PromiseImageTextureReleaseProc textureReleaseProc, + PromiseImageTextureContext textureContext); + + /** This entry point operates like 'MakePromiseTexture' but it is used to construct a SkImage + from YUV[A] data. The source data may be planar (i.e. spread across multiple textures). In + the extreme Y, U, V, and A are all in different planes and thus the image is specified by + four textures. 'backendTextureInfo' describes the planar arrangement, texture formats, + conversion to RGB, and origin of the textures. Separate 'textureFulfillProc' and + 'textureReleaseProc' calls are made for each texture. Each texture has its own + PromiseImageTextureContext. If 'backendTextureInfo' is not valid then no release proc + calls are made. Otherwise, the calls will be made even on failure. 'textureContexts' has one + entry for each of the up to four textures, as indicated by 'backendTextureInfo'. + + Currently the mip mapped property of 'backendTextureInfo' is ignored. However, in the + near future it will be required that if it is kYes then textureFulfillProc must return + a mip mapped texture for each plane in order to successfully draw the image. + + @param gpuContextProxy the thread-safe proxy of the gpu context. required. + @param backendTextureInfo info about the promised yuva gpu texture + @param imageColorSpace range of colors; may be nullptr + @param textureFulfillProc function called to get actual gpu texture + @param textureReleaseProc function called when texture can be deleted + @param textureContexts state passed to textureFulfillProc and textureReleaseProc + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakePromiseYUVATexture(sk_sp<GrContextThreadSafeProxy> gpuContextProxy, + const GrYUVABackendTextureInfo& backendTextureInfo, + sk_sp<SkColorSpace> imageColorSpace, + PromiseImageTextureFulfillProc textureFulfillProc, + PromiseImageTextureReleaseProc textureReleaseProc, + PromiseImageTextureContext textureContexts[]); + +#endif // SK_SUPPORT_GPU + +#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26 + /** (See Skia bug 7447) + Creates SkImage from Android hardware buffer. + Returned SkImage takes a reference on the buffer. + + Only available on Android, when __ANDROID_API__ is defined to be 26 or greater. + + @param hardwareBuffer AHardwareBuffer Android hardware buffer + @param colorSpace range of colors; may be nullptr + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromAHardwareBuffer( + AHardwareBuffer* hardwareBuffer, + SkAlphaType alphaType = kPremul_SkAlphaType, + sk_sp<SkColorSpace> colorSpace = nullptr, + GrSurfaceOrigin surfaceOrigin = kTopLeft_GrSurfaceOrigin); + + /** Creates SkImage from Android hardware buffer and uploads the data from the SkPixmap to it. + Returned SkImage takes a reference on the buffer. + + Only available on Android, when __ANDROID_API__ is defined to be 26 or greater. + + @param context GPU context + @param pixmap SkPixmap that contains data to be uploaded to the AHardwareBuffer + @param hardwareBuffer AHardwareBuffer Android hardware buffer + @param surfaceOrigin surface origin for resulting image + @return created SkImage, or nullptr + */ + static sk_sp<SkImage> MakeFromAHardwareBufferWithData( + GrDirectContext* context, + const SkPixmap& pixmap, + AHardwareBuffer* hardwareBuffer, + GrSurfaceOrigin surfaceOrigin = kTopLeft_GrSurfaceOrigin); +#endif + + /** Returns a SkImageInfo describing the width, height, color type, alpha type, and color space + of the SkImage. + + @return image info of SkImage. + */ + const SkImageInfo& imageInfo() const { return fInfo; } + + /** Returns pixel count in each row. + + @return pixel width in SkImage + */ + int width() const { return fInfo.width(); } + + /** Returns pixel row count. + + @return pixel height in SkImage + */ + int height() const { return fInfo.height(); } + + /** Returns SkISize { width(), height() }. + + @return integral size of width() and height() + */ + SkISize dimensions() const { return SkISize::Make(fInfo.width(), fInfo.height()); } + + /** Returns SkIRect { 0, 0, width(), height() }. + + @return integral rectangle from origin to width() and height() + */ + SkIRect bounds() const { return SkIRect::MakeWH(fInfo.width(), fInfo.height()); } + + /** Returns value unique to image. SkImage contents cannot change after SkImage is + created. Any operation to create a new SkImage will receive generate a new + unique number. + + @return unique identifier + */ + uint32_t uniqueID() const { return fUniqueID; } + + /** Returns SkAlphaType. + + SkAlphaType returned was a parameter to an SkImage constructor, + or was parsed from encoded data. + + @return SkAlphaType in SkImage + + example: https://fiddle.skia.org/c/@Image_alphaType + */ + SkAlphaType alphaType() const; + + /** Returns SkColorType if known; otherwise, returns kUnknown_SkColorType. + + @return SkColorType of SkImage + + example: https://fiddle.skia.org/c/@Image_colorType + */ + SkColorType colorType() const; + + /** Returns SkColorSpace, the range of colors, associated with SkImage. The + reference count of SkColorSpace is unchanged. The returned SkColorSpace is + immutable. + + SkColorSpace returned was passed to an SkImage constructor, + or was parsed from encoded data. SkColorSpace returned may be ignored when SkImage + is drawn, depending on the capabilities of the SkSurface receiving the drawing. + + @return SkColorSpace in SkImage, or nullptr + + example: https://fiddle.skia.org/c/@Image_colorSpace + */ + SkColorSpace* colorSpace() const; + + /** Returns a smart pointer to SkColorSpace, the range of colors, associated with + SkImage. The smart pointer tracks the number of objects sharing this + SkColorSpace reference so the memory is released when the owners destruct. + + The returned SkColorSpace is immutable. + + SkColorSpace returned was passed to an SkImage constructor, + or was parsed from encoded data. SkColorSpace returned may be ignored when SkImage + is drawn, depending on the capabilities of the SkSurface receiving the drawing. + + @return SkColorSpace in SkImage, or nullptr, wrapped in a smart pointer + + example: https://fiddle.skia.org/c/@Image_refColorSpace + */ + sk_sp<SkColorSpace> refColorSpace() const; + + /** Returns true if SkImage pixels represent transparency only. If true, each pixel + is packed in 8 bits as defined by kAlpha_8_SkColorType. + + @return true if pixels represent a transparency mask + + example: https://fiddle.skia.org/c/@Image_isAlphaOnly + */ + bool isAlphaOnly() const; + + /** Returns true if pixels ignore their alpha value and are treated as fully opaque. + + @return true if SkAlphaType is kOpaque_SkAlphaType + */ + bool isOpaque() const { return SkAlphaTypeIsOpaque(this->alphaType()); } + + /** + * Make a shader with the specified tiling and mipmap sampling. + */ + sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions&, + const SkMatrix* localMatrix = nullptr) const; + + sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions& sampling, + const SkMatrix& lm) const { + return this->makeShader(tmx, tmy, sampling, &lm); + } + sk_sp<SkShader> makeShader(const SkSamplingOptions& sampling, const SkMatrix& lm) const { + return this->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, sampling, &lm); + } + sk_sp<SkShader> makeShader(const SkSamplingOptions& sampling, + const SkMatrix* lm = nullptr) const { + return this->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, sampling, lm); + } + + /** + * makeRawShader functions like makeShader, but for images that contain non-color data. + * This includes images encoding things like normals, material properties (eg, roughness), + * heightmaps, or any other purely mathematical data that happens to be stored in an image. + * These types of images are useful with some programmable shaders (see: SkRuntimeEffect). + * + * Raw image shaders work like regular image shaders (including filtering and tiling), with + * a few major differences: + * - No color space transformation is ever applied (the color space of the image is ignored). + * - Images with an alpha type of kUnpremul are *not* automatically premultiplied. + * - Bicubic filtering is not supported. If SkSamplingOptions::useCubic is true, these + * factories will return nullptr. + */ + sk_sp<SkShader> makeRawShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions&, + const SkMatrix* localMatrix = nullptr) const; + + sk_sp<SkShader> makeRawShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions& sampling, + const SkMatrix& lm) const { + return this->makeRawShader(tmx, tmy, sampling, &lm); + } + sk_sp<SkShader> makeRawShader(const SkSamplingOptions& sampling, const SkMatrix& lm) const { + return this->makeRawShader(SkTileMode::kClamp, SkTileMode::kClamp, sampling, &lm); + } + sk_sp<SkShader> makeRawShader(const SkSamplingOptions& sampling, + const SkMatrix* lm = nullptr) const { + return this->makeRawShader(SkTileMode::kClamp, SkTileMode::kClamp, sampling, lm); + } + + using CubicResampler = SkCubicResampler; + + /** Copies SkImage pixel address, row bytes, and SkImageInfo to pixmap, if address + is available, and returns true. If pixel address is not available, return + false and leave pixmap unchanged. + + @param pixmap storage for pixel state if pixels are readable; otherwise, ignored + @return true if SkImage has direct access to pixels + + example: https://fiddle.skia.org/c/@Image_peekPixels + */ + bool peekPixels(SkPixmap* pixmap) const; + + /** Returns true the contents of SkImage was created on or uploaded to GPU memory, + and is available as a GPU texture. + + @return true if SkImage is a GPU texture + + example: https://fiddle.skia.org/c/@Image_isTextureBacked + */ + bool isTextureBacked() const; + + /** Returns an approximation of the amount of texture memory used by the image. Returns + zero if the image is not texture backed or if the texture has an external format. + */ + size_t textureSize() const; + + /** Returns true if SkImage can be drawn on either raster surface or GPU surface. + If context is nullptr, tests if SkImage draws on raster surface; + otherwise, tests if SkImage draws on GPU surface associated with context. + + SkImage backed by GPU texture may become invalid if associated context is + invalid. lazy image may be invalid and may not draw to raster surface or + GPU surface or both. + + @param context GPU context + @return true if SkImage can be drawn + + example: https://fiddle.skia.org/c/@Image_isValid + */ + bool isValid(GrRecordingContext* context) const; + +#if SK_SUPPORT_GPU + /** Flushes any pending uses of texture-backed images in the GPU backend. If the image is not + texture-backed (including promise texture images) or if the GrDirectContext does not + have the same context ID as the context backing the image then this is a no-op. + + If the image was not used in any non-culled draws in the current queue of work for the + passed GrDirectContext then this is a no-op unless the GrFlushInfo contains semaphores or + a finish proc. Those are respected even when the image has not been used. + + @param context the context on which to flush pending usages of the image. + @param info flush options + */ + GrSemaphoresSubmitted flush(GrDirectContext* context, const GrFlushInfo& flushInfo) const; + + void flush(GrDirectContext* context) const { this->flush(context, {}); } + + /** Version of flush() that uses a default GrFlushInfo. Also submits the flushed work to the + GPU. + */ + void flushAndSubmit(GrDirectContext*) const; + + /** Retrieves the back-end texture. If SkImage has no back-end texture, an invalid + object is returned. Call GrBackendTexture::isValid to determine if the result + is valid. + + If flushPendingGrContextIO is true, completes deferred I/O operations. + + If origin in not nullptr, copies location of content drawn into SkImage. + + @param flushPendingGrContextIO flag to flush outstanding requests + @return back-end API texture handle; invalid on failure + */ + GrBackendTexture getBackendTexture(bool flushPendingGrContextIO, + GrSurfaceOrigin* origin = nullptr) const; +#endif // SK_SUPPORT_GPU + + /** \enum SkImage::CachingHint + CachingHint selects whether Skia may internally cache SkBitmap generated by + decoding SkImage, or by copying SkImage from GPU to CPU. The default behavior + allows caching SkBitmap. + + Choose kDisallow_CachingHint if SkImage pixels are to be used only once, or + if SkImage pixels reside in a cache outside of Skia, or to reduce memory pressure. + + Choosing kAllow_CachingHint does not ensure that pixels will be cached. + SkImage pixels may not be cached if memory requirements are too large or + pixels are not accessible. + */ + enum CachingHint { + kAllow_CachingHint, //!< allows internally caching decoded and copied pixels + kDisallow_CachingHint, //!< disallows internally caching decoded and copied pixels + }; + + /** Copies SkRect of pixels from SkImage to dstPixels. Copy starts at offset (srcX, srcY), + and does not exceed SkImage (width(), height()). + + dstInfo specifies width, height, SkColorType, SkAlphaType, and SkColorSpace of + destination. dstRowBytes specifies the gap from one destination row to the next. + Returns true if pixels are copied. Returns false if: + - dstInfo.addr() equals nullptr + - dstRowBytes is less than dstInfo.minRowBytes() + - SkPixelRef is nullptr + + Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match. + If SkImage SkColorType is kGray_8_SkColorType, dstInfo.colorSpace() must match. + If SkImage SkAlphaType is kOpaque_SkAlphaType, dstInfo.alphaType() must + match. If SkImage SkColorSpace is nullptr, dstInfo.colorSpace() must match. Returns + false if pixel conversion is not possible. + + srcX and srcY may be negative to copy only top or left of source. Returns + false if width() or height() is zero or negative. + Returns false if abs(srcX) >= Image width(), or if abs(srcY) >= Image height(). + + If cachingHint is kAllow_CachingHint, pixels may be retained locally. + If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache. + + @param context the GrDirectContext in play, if it exists + @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace + @param dstPixels destination pixel storage + @param dstRowBytes destination row length + @param srcX column index whose absolute value is less than width() + @param srcY row index whose absolute value is less than height() + @param cachingHint whether the pixels should be cached locally + @return true if pixels are copied to dstPixels + */ + bool readPixels(GrDirectContext* context, + const SkImageInfo& dstInfo, + void* dstPixels, + size_t dstRowBytes, + int srcX, int srcY, + CachingHint cachingHint = kAllow_CachingHint) const; + + /** Copies a SkRect of pixels from SkImage to dst. Copy starts at (srcX, srcY), and + does not exceed SkImage (width(), height()). + + dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage, + and row bytes of destination. dst.rowBytes() specifics the gap from one destination + row to the next. Returns true if pixels are copied. Returns false if: + - dst pixel storage equals nullptr + - dst.rowBytes is less than SkImageInfo::minRowBytes + - SkPixelRef is nullptr + + Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.colorType() must match. + If SkImage SkColorType is kGray_8_SkColorType, dst.colorSpace() must match. + If SkImage SkAlphaType is kOpaque_SkAlphaType, dst.alphaType() must + match. If SkImage SkColorSpace is nullptr, dst.colorSpace() must match. Returns + false if pixel conversion is not possible. + + srcX and srcY may be negative to copy only top or left of source. Returns + false if width() or height() is zero or negative. + Returns false if abs(srcX) >= Image width(), or if abs(srcY) >= Image height(). + + If cachingHint is kAllow_CachingHint, pixels may be retained locally. + If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache. + + @param context the GrDirectContext in play, if it exists + @param dst destination SkPixmap: SkImageInfo, pixels, row bytes + @param srcX column index whose absolute value is less than width() + @param srcY row index whose absolute value is less than height() + @param cachingHint whether the pixels should be cached locallyZ + @return true if pixels are copied to dst + */ + bool readPixels(GrDirectContext* context, + const SkPixmap& dst, + int srcX, + int srcY, + CachingHint cachingHint = kAllow_CachingHint) const; + +#ifndef SK_IMAGE_READ_PIXELS_DISABLE_LEGACY_API + /** Deprecated. Use the variants that accept a GrDirectContext. */ + bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes, + int srcX, int srcY, CachingHint cachingHint = kAllow_CachingHint) const; + bool readPixels(const SkPixmap& dst, int srcX, int srcY, + CachingHint cachingHint = kAllow_CachingHint) const; +#endif + + /** The result from asyncRescaleAndReadPixels() or asyncRescaleAndReadPixelsYUV420(). */ + class AsyncReadResult { + public: + AsyncReadResult(const AsyncReadResult&) = delete; + AsyncReadResult(AsyncReadResult&&) = delete; + AsyncReadResult& operator=(const AsyncReadResult&) = delete; + AsyncReadResult& operator=(AsyncReadResult&&) = delete; + + virtual ~AsyncReadResult() = default; + virtual int count() const = 0; + virtual const void* data(int i) const = 0; + virtual size_t rowBytes(int i) const = 0; + + protected: + AsyncReadResult() = default; + }; + + /** Client-provided context that is passed to client-provided ReadPixelsContext. */ + using ReadPixelsContext = void*; + + /** Client-provided callback to asyncRescaleAndReadPixels() or + asyncRescaleAndReadPixelsYUV420() that is called when read result is ready or on failure. + */ + using ReadPixelsCallback = void(ReadPixelsContext, std::unique_ptr<const AsyncReadResult>); + + enum class RescaleGamma : bool { kSrc, kLinear }; + + enum class RescaleMode { + kNearest, + kRepeatedLinear, + kRepeatedCubic, + }; + + /** Makes image pixel data available to caller, possibly asynchronously. It can also rescale + the image pixels. + + Currently asynchronous reads are only supported on the GPU backend and only when the + underlying 3D API supports transfer buffers and CPU/GPU synchronization primitives. In all + other cases this operates synchronously. + + Data is read from the source sub-rectangle, is optionally converted to a linear gamma, is + rescaled to the size indicated by 'info', is then converted to the color space, color type, + and alpha type of 'info'. A 'srcRect' that is not contained by the bounds of the image + causes failure. + + When the pixel data is ready the caller's ReadPixelsCallback is called with a + AsyncReadResult containing pixel data in the requested color type, alpha type, and color + space. The AsyncReadResult will have count() == 1. Upon failure the callback is called with + nullptr for AsyncReadResult. For a GPU image this flushes work but a submit must occur to + guarantee a finite time before the callback is called. + + The data is valid for the lifetime of AsyncReadResult with the exception that if the SkImage + is GPU-backed the data is immediately invalidated if the context is abandoned or + destroyed. + + @param info info of the requested pixels + @param srcRect subrectangle of image to read + @param rescaleGamma controls whether rescaling is done in the image's gamma or whether + the source data is transformed to a linear gamma before rescaling. + @param rescaleMode controls the technique (and cost) of the rescaling + @param callback function to call with result of the read + @param context passed to callback + */ + void asyncRescaleAndReadPixels(const SkImageInfo& info, + const SkIRect& srcRect, + RescaleGamma rescaleGamma, + RescaleMode rescaleMode, + ReadPixelsCallback callback, + ReadPixelsContext context) const; + + /** + Similar to asyncRescaleAndReadPixels but performs an additional conversion to YUV. The + RGB->YUV conversion is controlled by 'yuvColorSpace'. The YUV data is returned as three + planes ordered y, u, v. The u and v planes are half the width and height of the resized + rectangle. The y, u, and v values are single bytes. Currently this fails if 'dstSize' + width and height are not even. A 'srcRect' that is not contained by the bounds of the + image causes failure. + + When the pixel data is ready the caller's ReadPixelsCallback is called with a + AsyncReadResult containing the planar data. The AsyncReadResult will have count() == 3. + Upon failure the callback is called with nullptr for AsyncReadResult. For a GPU image this + flushes work but a submit must occur to guarantee a finite time before the callback is + called. + + The data is valid for the lifetime of AsyncReadResult with the exception that if the SkImage + is GPU-backed the data is immediately invalidated if the context is abandoned or + destroyed. + + @param yuvColorSpace The transformation from RGB to YUV. Applied to the resized image + after it is converted to dstColorSpace. + @param dstColorSpace The color space to convert the resized image to, after rescaling. + @param srcRect The portion of the image to rescale and convert to YUV planes. + @param dstSize The size to rescale srcRect to + @param rescaleGamma controls whether rescaling is done in the image's gamma or whether + the source data is transformed to a linear gamma before rescaling. + @param rescaleMode controls the technique (and cost) of the rescaling + @param callback function to call with the planar read result + @param context passed to callback + */ + void asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace, + sk_sp<SkColorSpace> dstColorSpace, + const SkIRect& srcRect, + const SkISize& dstSize, + RescaleGamma rescaleGamma, + RescaleMode rescaleMode, + ReadPixelsCallback callback, + ReadPixelsContext context) const; + + /** Copies SkImage to dst, scaling pixels to fit dst.width() and dst.height(), and + converting pixels to match dst.colorType() and dst.alphaType(). Returns true if + pixels are copied. Returns false if dst.addr() is nullptr, or dst.rowBytes() is + less than dst SkImageInfo::minRowBytes. + + Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.colorType() must match. + If SkImage SkColorType is kGray_8_SkColorType, dst.colorSpace() must match. + If SkImage SkAlphaType is kOpaque_SkAlphaType, dst.alphaType() must + match. If SkImage SkColorSpace is nullptr, dst.colorSpace() must match. Returns + false if pixel conversion is not possible. + + If cachingHint is kAllow_CachingHint, pixels may be retained locally. + If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache. + + @param dst destination SkPixmap: SkImageInfo, pixels, row bytes + @return true if pixels are scaled to fit dst + */ + bool scalePixels(const SkPixmap& dst, const SkSamplingOptions&, + CachingHint cachingHint = kAllow_CachingHint) const; + + /** Encodes SkImage pixels, returning result as SkData. + + Returns nullptr if encoding fails, or if encodedImageFormat is not supported. + + SkImage encoding in a format requires both building with one or more of: + SK_ENCODE_JPEG, SK_ENCODE_PNG, SK_ENCODE_WEBP; and platform support + for the encoded format. + + If SK_BUILD_FOR_MAC or SK_BUILD_FOR_IOS is defined, encodedImageFormat can + additionally be one of: SkEncodedImageFormat::kICO, SkEncodedImageFormat::kBMP, + SkEncodedImageFormat::kGIF. + + quality is a platform and format specific metric trading off size and encoding + error. When used, quality equaling 100 encodes with the least error. quality may + be ignored by the encoder. + + @param encodedImageFormat one of: SkEncodedImageFormat::kJPEG, SkEncodedImageFormat::kPNG, + SkEncodedImageFormat::kWEBP + @param quality encoder specific metric with 100 equaling best + @return encoded SkImage, or nullptr + + example: https://fiddle.skia.org/c/@Image_encodeToData + */ + sk_sp<SkData> encodeToData(SkEncodedImageFormat encodedImageFormat, int quality) const; + + /** Encodes SkImage pixels, returning result as SkData. Returns existing encoded data + if present; otherwise, SkImage is encoded with SkEncodedImageFormat::kPNG. Skia + must be built with SK_ENCODE_PNG to encode SkImage. + + Returns nullptr if existing encoded data is missing or invalid, and + encoding fails. + + @return encoded SkImage, or nullptr + + example: https://fiddle.skia.org/c/@Image_encodeToData_2 + */ + sk_sp<SkData> encodeToData() const; + + /** Returns encoded SkImage pixels as SkData, if SkImage was created from supported + encoded stream format. Platform support for formats vary and may require building + with one or more of: SK_ENCODE_JPEG, SK_ENCODE_PNG, SK_ENCODE_WEBP. + + Returns nullptr if SkImage contents are not encoded. + + @return encoded SkImage, or nullptr + + example: https://fiddle.skia.org/c/@Image_refEncodedData + */ + sk_sp<SkData> refEncodedData() const; + + /** Returns subset of this image. + + Returns nullptr if any of the following are true: + - Subset is empty + - Subset is not contained inside the image's bounds + - Pixels in the image could not be read or copied + + If this image is texture-backed, the context parameter is required and must match the + context of the source image. If the context parameter is provided, and the image is + raster-backed, the subset will be converted to texture-backed. + + @param subset bounds of returned SkImage + @param context the GrDirectContext in play, if it exists + @return the subsetted image, or nullptr + + example: https://fiddle.skia.org/c/@Image_makeSubset + */ + sk_sp<SkImage> makeSubset(const SkIRect& subset, GrDirectContext* direct = nullptr) const; + + /** + * Returns true if the image has mipmap levels. + */ + bool hasMipmaps() const; + + /** + * Returns an image with the same "base" pixels as the this image, but with mipmap levels + * automatically generated and attached. + */ + sk_sp<SkImage> withDefaultMipmaps() const; + +#if SK_SUPPORT_GPU + /** Returns SkImage backed by GPU texture associated with context. Returned SkImage is + compatible with SkSurface created with dstColorSpace. The returned SkImage respects + mipMapped setting; if mipMapped equals GrMipmapped::kYes, the backing texture + allocates mip map levels. + + The mipMapped parameter is effectively treated as kNo if MIP maps are not supported by the + GPU. + + Returns original SkImage if the image is already texture-backed, the context matches, and + mipMapped is compatible with the backing GPU texture. SkBudgeted is ignored in this case. + + Returns nullptr if context is nullptr, or if SkImage was created with another + GrDirectContext. + + @param GrDirectContext the GrDirectContext in play, if it exists + @param GrMipmapped whether created SkImage texture must allocate mip map levels + @param SkBudgeted whether to count a newly created texture for the returned image + counts against the context's budget. + @return created SkImage, or nullptr + */ + sk_sp<SkImage> makeTextureImage(GrDirectContext*, + GrMipmapped = GrMipmapped::kNo, + SkBudgeted = SkBudgeted::kYes) const; +#endif + + /** Returns raster image or lazy image. Copies SkImage backed by GPU texture into + CPU memory if needed. Returns original SkImage if decoded in raster bitmap, + or if encoded in a stream. + + Returns nullptr if backed by GPU texture and copy fails. + + @return raster image, lazy image, or nullptr + + example: https://fiddle.skia.org/c/@Image_makeNonTextureImage + */ + sk_sp<SkImage> makeNonTextureImage() const; + + /** Returns raster image. Copies SkImage backed by GPU texture into CPU memory, + or decodes SkImage from lazy image. Returns original SkImage if decoded in + raster bitmap. + + Returns nullptr if copy, decode, or pixel read fails. + + If cachingHint is kAllow_CachingHint, pixels may be retained locally. + If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache. + + @return raster image, or nullptr + + example: https://fiddle.skia.org/c/@Image_makeRasterImage + */ + sk_sp<SkImage> makeRasterImage(CachingHint cachingHint = kDisallow_CachingHint) const; + + /** Creates filtered SkImage. filter processes original SkImage, potentially changing + color, position, and size. subset is the bounds of original SkImage processed + by filter. clipBounds is the expected bounds of the filtered SkImage. outSubset + is required storage for the actual bounds of the filtered SkImage. offset is + required storage for translation of returned SkImage. + + Returns nullptr if SkImage could not be created or if the recording context provided doesn't + match the GPU context in which the image was created. If nullptr is returned, outSubset + and offset are undefined. + + Useful for animation of SkImageFilter that varies size from frame to frame. + Returned SkImage is created larger than required by filter so that GPU texture + can be reused with different sized effects. outSubset describes the valid bounds + of GPU texture returned. offset translates the returned SkImage to keep subsequent + animation frames aligned with respect to each other. + + @param context the GrRecordingContext in play - if it exists + @param filter how SkImage is sampled when transformed + @param subset bounds of SkImage processed by filter + @param clipBounds expected bounds of filtered SkImage + @param outSubset storage for returned SkImage bounds + @param offset storage for returned SkImage translation + @return filtered SkImage, or nullptr + */ + sk_sp<SkImage> makeWithFilter(GrRecordingContext* context, + const SkImageFilter* filter, const SkIRect& subset, + const SkIRect& clipBounds, SkIRect* outSubset, + SkIPoint* offset) const; + + /** Defines a callback function, taking one parameter of type GrBackendTexture with + no return value. Function is called when back-end texture is to be released. + */ + typedef std::function<void(GrBackendTexture)> BackendTextureReleaseProc; + +#if SK_SUPPORT_GPU + /** Creates a GrBackendTexture from the provided SkImage. Returns true and + stores result in backendTexture and backendTextureReleaseProc if + texture is created; otherwise, returns false and leaves + backendTexture and backendTextureReleaseProc unmodified. + + Call backendTextureReleaseProc after deleting backendTexture. + backendTextureReleaseProc cleans up auxiliary data related to returned + backendTexture. The caller must delete returned backendTexture after use. + + If SkImage is both texture backed and singly referenced, image is returned in + backendTexture without conversion or making a copy. SkImage is singly referenced + if its was transferred solely using std::move(). + + If SkImage is not texture backed, returns texture with SkImage contents. + + @param context GPU context + @param image SkImage used for texture + @param backendTexture storage for back-end texture + @param backendTextureReleaseProc storage for clean up function + @return true if back-end texture was created + */ + static bool MakeBackendTextureFromSkImage(GrDirectContext* context, + sk_sp<SkImage> image, + GrBackendTexture* backendTexture, + BackendTextureReleaseProc* backendTextureReleaseProc); +#endif + /** Deprecated. + */ + enum LegacyBitmapMode { + kRO_LegacyBitmapMode, //!< returned bitmap is read-only and immutable + }; + + /** Deprecated. + Creates raster SkBitmap with same pixels as SkImage. If legacyBitmapMode is + kRO_LegacyBitmapMode, returned bitmap is read-only and immutable. + Returns true if SkBitmap is stored in bitmap. Returns false and resets bitmap if + SkBitmap write did not succeed. + + @param bitmap storage for legacy SkBitmap + @param legacyBitmapMode bitmap is read-only and immutable + @return true if SkBitmap was created + */ + bool asLegacyBitmap(SkBitmap* bitmap, + LegacyBitmapMode legacyBitmapMode = kRO_LegacyBitmapMode) const; + + /** Returns true if SkImage is backed by an image-generator or other service that creates + and caches its pixels or texture on-demand. + + @return true if SkImage is created as needed + + example: https://fiddle.skia.org/c/@Image_isLazyGenerated_a + example: https://fiddle.skia.org/c/@Image_isLazyGenerated_b + */ + bool isLazyGenerated() const; + + /** Creates SkImage in target SkColorSpace. + Returns nullptr if SkImage could not be created. + + Returns original SkImage if it is in target SkColorSpace. + Otherwise, converts pixels from SkImage SkColorSpace to target SkColorSpace. + If SkImage colorSpace() returns nullptr, SkImage SkColorSpace is assumed to be sRGB. + + If this image is texture-backed, the context parameter is required and must match the + context of the source image. + + @param target SkColorSpace describing color range of returned SkImage + @param direct The GrDirectContext in play, if it exists + @return created SkImage in target SkColorSpace + + example: https://fiddle.skia.org/c/@Image_makeColorSpace + */ + sk_sp<SkImage> makeColorSpace(sk_sp<SkColorSpace> target, + GrDirectContext* direct = nullptr) const; + + /** Experimental. + Creates SkImage in target SkColorType and SkColorSpace. + Returns nullptr if SkImage could not be created. + + Returns original SkImage if it is in target SkColorType and SkColorSpace. + + If this image is texture-backed, the context parameter is required and must match the + context of the source image. + + @param targetColorType SkColorType of returned SkImage + @param targetColorSpace SkColorSpace of returned SkImage + @param direct The GrDirectContext in play, if it exists + @return created SkImage in target SkColorType and SkColorSpace + */ + sk_sp<SkImage> makeColorTypeAndColorSpace(SkColorType targetColorType, + sk_sp<SkColorSpace> targetColorSpace, + GrDirectContext* direct = nullptr) const; + + /** Creates a new SkImage identical to this one, but with a different SkColorSpace. + This does not convert the underlying pixel data, so the resulting image will draw + differently. + */ + sk_sp<SkImage> reinterpretColorSpace(sk_sp<SkColorSpace> newColorSpace) const; + +private: + SkImage(const SkImageInfo& info, uint32_t uniqueID); + + friend class SkBitmap; + friend class SkImage_Base; + friend class SkMipmapBuilder; + + SkImageInfo fInfo; + const uint32_t fUniqueID; + + sk_sp<SkImage> withMipmaps(sk_sp<SkMipmap>) const; + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/core/SkImageEncoder.h b/src/deps/skia/include/core/SkImageEncoder.h new file mode 100644 index 000000000..fd7bc8036 --- /dev/null +++ b/src/deps/skia/include/core/SkImageEncoder.h @@ -0,0 +1,72 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImageEncoder_DEFINED +#define SkImageEncoder_DEFINED + +// TODO: update clients so we can remove this include, they should IWYU +#include "include/core/SkBitmap.h" + +#include "include/core/SkData.h" +#include "include/core/SkEncodedImageFormat.h" +#include "include/core/SkPixmap.h" +#include "include/core/SkStream.h" + +class SkBitmap; + +/** + * Encode SkPixmap in the given binary image format. + * + * @param dst results are written to this stream. + * @param src source pixels. + * @param format image format, not all formats are supported. + * @param quality range from 0-100, this is supported by jpeg and webp. + * higher values correspond to improved visual quality, but less compression. + * + * @return false iff input is bad or format is unsupported. + * + * Will always return false if Skia is compiled without image + * encoders. + * + * For SkEncodedImageFormat::kWEBP, if quality is 100, it will use lossless compression. Otherwise + * it will use lossy. + * + * For examples of encoding an image to a file or to a block of memory, + * see tools/ToolUtils.h. + */ +SK_API bool SkEncodeImage(SkWStream* dst, const SkPixmap& src, + SkEncodedImageFormat format, int quality); + +/** + * The following helper function wraps SkEncodeImage(). + */ +SK_API bool SkEncodeImage(SkWStream* dst, const SkBitmap& src, SkEncodedImageFormat f, int q); + +/** + * Encode SkPixmap in the given binary image format. + * + * @param src source pixels. + * @param format image format, not all formats are supported. + * @param quality range from 0-100, this is supported by jpeg and webp. + * higher values correspond to improved visual quality, but less compression. + * + * @return encoded data or nullptr if input is bad or format is unsupported. + * + * Will always return nullptr if Skia is compiled without image + * encoders. + * + * For SkEncodedImageFormat::kWEBP, if quality is 100, it will use lossless compression. Otherwise + * it will use lossy. + */ +SK_API sk_sp<SkData> SkEncodePixmap(const SkPixmap& src, SkEncodedImageFormat format, int quality); + +/** + * Helper that extracts the pixmap from the bitmap, and then calls SkEncodePixmap() + */ +SK_API sk_sp<SkData> SkEncodeBitmap(const SkBitmap& src, SkEncodedImageFormat format, int quality); + +#endif // SkImageEncoder_DEFINED diff --git a/src/deps/skia/include/core/SkImageFilter.h b/src/deps/skia/include/core/SkImageFilter.h new file mode 100644 index 000000000..e2240916d --- /dev/null +++ b/src/deps/skia/include/core/SkImageFilter.h @@ -0,0 +1,114 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImageFilter_DEFINED +#define SkImageFilter_DEFINED + +#include "include/core/SkFlattenable.h" +#include "include/core/SkMatrix.h" +#include "include/core/SkRect.h" + +class SkColorFilter; + +/** + * Base class for image filters. If one is installed in the paint, then all drawing occurs as + * usual, but it is as if the drawing happened into an offscreen (before the xfermode is applied). + * This offscreen bitmap will then be handed to the imagefilter, who in turn creates a new bitmap + * which is what will finally be drawn to the device (using the original xfermode). + * + * The local space of image filters matches the local space of the drawn geometry. For instance if + * there is rotation on the canvas, the blur will be computed along those rotated axes and not in + * the device space. In order to achieve this result, the actual drawing of the geometry may happen + * in an unrotated coordinate system so that the filtered image can be computed more easily, and + * then it will be post transformed to match what would have been produced if the geometry were + * drawn with the total canvas matrix to begin with. + */ +class SK_API SkImageFilter : public SkFlattenable { +public: + enum MapDirection { + kForward_MapDirection, + kReverse_MapDirection, + }; + /** + * Map a device-space rect recursively forward or backward through the filter DAG. + * kForward_MapDirection is used to determine which pixels of the destination canvas a source + * image rect would touch after filtering. kReverse_MapDirection is used to determine which rect + * of the source image would be required to fill the given rect (typically, clip bounds). Used + * for clipping and temp-buffer allocations, so the result need not be exact, but should never + * be smaller than the real answer. The default implementation recursively unions all input + * bounds, or returns the source rect if no inputs. + * + * In kReverse mode, 'inputRect' is the device-space bounds of the input pixels. In kForward + * mode it should always be null. If 'inputRect' is null in kReverse mode the resulting answer + * may be incorrect. + */ + SkIRect filterBounds(const SkIRect& src, const SkMatrix& ctm, + MapDirection, const SkIRect* inputRect = nullptr) const; + + /** + * Returns whether this image filter is a color filter and puts the color filter into the + * "filterPtr" parameter if it can. Does nothing otherwise. + * If this returns false, then the filterPtr is unchanged. + * If this returns true, then if filterPtr is not null, it must be set to a ref'd colorfitler + * (i.e. it may not be set to NULL). + */ + bool isColorFilterNode(SkColorFilter** filterPtr) const; + + // DEPRECATED : use isColorFilterNode() instead + bool asColorFilter(SkColorFilter** filterPtr) const { + return this->isColorFilterNode(filterPtr); + } + + /** + * Returns true (and optionally returns a ref'd filter) if this imagefilter can be completely + * replaced by the returned colorfilter. i.e. the two effects will affect drawing in the same + * way. + */ + bool asAColorFilter(SkColorFilter** filterPtr) const; + + /** + * Returns the number of inputs this filter will accept (some inputs can be NULL). + */ + int countInputs() const; + + /** + * Returns the input filter at a given index, or NULL if no input is connected. The indices + * used are filter-specific. + */ + const SkImageFilter* getInput(int i) const; + + // Default impl returns union of all input bounds. + virtual SkRect computeFastBounds(const SkRect& bounds) const; + + // Can this filter DAG compute the resulting bounds of an object-space rectangle? + bool canComputeFastBounds() const; + + /** + * If this filter can be represented by another filter + a localMatrix, return that filter, + * else return null. + */ + sk_sp<SkImageFilter> makeWithLocalMatrix(const SkMatrix& matrix) const; + + static sk_sp<SkImageFilter> Deserialize(const void* data, size_t size, + const SkDeserialProcs* procs = nullptr) { + return sk_sp<SkImageFilter>(static_cast<SkImageFilter*>( + SkFlattenable::Deserialize(kSkImageFilter_Type, data, size, procs).release())); + } + +protected: + + sk_sp<SkImageFilter> refMe() const { + return sk_ref_sp(const_cast<SkImageFilter*>(this)); + } + +private: + friend class SkImageFilter_Base; + + using INHERITED = SkFlattenable; +}; + +#endif diff --git a/src/deps/skia/include/core/SkImageGenerator.h b/src/deps/skia/include/core/SkImageGenerator.h new file mode 100644 index 000000000..abc781754 --- /dev/null +++ b/src/deps/skia/include/core/SkImageGenerator.h @@ -0,0 +1,215 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImageGenerator_DEFINED +#define SkImageGenerator_DEFINED + +#include "include/core/SkBitmap.h" +#include "include/core/SkColor.h" +#include "include/core/SkImage.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkYUVAPixmaps.h" +#include "include/private/SkTOptional.h" + +class GrRecordingContext; +class GrSurfaceProxyView; +class GrSamplerState; +class SkBitmap; +class SkData; +class SkMatrix; +class SkPaint; +class SkPicture; + +enum class GrImageTexGenPolicy : int; + +class SK_API SkImageGenerator { +public: + /** + * The PixelRef which takes ownership of this SkImageGenerator + * will call the image generator's destructor. + */ + virtual ~SkImageGenerator() { } + + uint32_t uniqueID() const { return fUniqueID; } + + /** + * Return a ref to the encoded (i.e. compressed) representation + * of this data. + * + * If non-NULL is returned, the caller is responsible for calling + * unref() on the data when it is finished. + */ + sk_sp<SkData> refEncodedData() { + return this->onRefEncodedData(); + } + + /** + * Return the ImageInfo associated with this generator. + */ + const SkImageInfo& getInfo() const { return fInfo; } + + /** + * Can this generator be used to produce images that will be drawable to the specified context + * (or to CPU, if context is nullptr)? + */ + bool isValid(GrRecordingContext* context) const { + return this->onIsValid(context); + } + + /** + * Decode into the given pixels, a block of memory of size at + * least (info.fHeight - 1) * rowBytes + (info.fWidth * + * bytesPerPixel) + * + * Repeated calls to this function should give the same results, + * allowing the PixelRef to be immutable. + * + * @param info A description of the format + * expected by the caller. This can simply be identical + * to the info returned by getInfo(). + * + * This contract also allows the caller to specify + * different output-configs, which the implementation can + * decide to support or not. + * + * A size that does not match getInfo() implies a request + * to scale. If the generator cannot perform this scale, + * it will return false. + * + * @return true on success. + */ + bool getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes); + + bool getPixels(const SkPixmap& pm) { + return this->getPixels(pm.info(), pm.writable_addr(), pm.rowBytes()); + } + + /** + * If decoding to YUV is supported, this returns true. Otherwise, this + * returns false and the caller will ignore output parameter yuvaPixmapInfo. + * + * @param supportedDataTypes Indicates the data type/planar config combinations that are + * supported by the caller. If the generator supports decoding to + * YUV(A), but not as a type in supportedDataTypes, this method + * returns false. + * @param yuvaPixmapInfo Output parameter that specifies the planar configuration, subsampling, + * orientation, chroma siting, plane color types, and row bytes. + */ + bool queryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes, + SkYUVAPixmapInfo* yuvaPixmapInfo) const; + + /** + * Returns true on success and false on failure. + * This always attempts to perform a full decode. To get the planar + * configuration without decoding use queryYUVAInfo(). + * + * @param yuvaPixmaps Contains preallocated pixmaps configured according to a successful call + * to queryYUVAInfo(). + */ + bool getYUVAPlanes(const SkYUVAPixmaps& yuvaPixmaps); + +#if SK_SUPPORT_GPU + /** + * If the generator can natively/efficiently return its pixels as a GPU image (backed by a + * texture) this will return that image. If not, this will return NULL. + * + * This routine also supports retrieving only a subset of the pixels. That subset is specified + * by the following rectangle: + * + * subset = SkIRect::MakeXYWH(origin.x(), origin.y(), info.width(), info.height()) + * + * If subset is not contained inside the generator's bounds, this returns false. + * + * whole = SkIRect::MakeWH(getInfo().width(), getInfo().height()) + * if (!whole.contains(subset)) { + * return false; + * } + * + * Regarding the GrRecordingContext parameter: + * + * It must be non-NULL. The generator should only succeed if: + * - its internal context is the same + * - it can somehow convert its texture into one that is valid for the provided context. + * + * If the willNeedMipMaps flag is true, the generator should try to create a TextureProxy that + * at least has the mip levels allocated and the base layer filled in. If this is not possible, + * the generator is allowed to return a non mipped proxy, but this will have some additional + * overhead in later allocating mips and copying of the base layer. + * + * GrImageTexGenPolicy determines whether or not a new texture must be created (and its budget + * status) or whether this may (but is not required to) return a pre-existing texture that is + * retained by the generator (kDraw). + */ + GrSurfaceProxyView generateTexture(GrRecordingContext*, const SkImageInfo& info, + const SkIPoint& origin, GrMipmapped, GrImageTexGenPolicy); + +#endif + + /** + * If the default image decoder system can interpret the specified (encoded) data, then + * this returns a new ImageGenerator for it. Otherwise this returns NULL. Either way + * the caller is still responsible for managing their ownership of the data. + * By default, images will be converted to premultiplied pixels. The alpha type can be + * overridden by specifying kPremul_SkAlphaType or kUnpremul_SkAlphaType. Specifying + * kOpaque_SkAlphaType is not supported, and will return NULL. + */ + static std::unique_ptr<SkImageGenerator> MakeFromEncoded( + sk_sp<SkData>, skstd::optional<SkAlphaType> = skstd::nullopt); + + /** Return a new image generator backed by the specified picture. If the size is empty or + * the picture is NULL, this returns NULL. + * The optional matrix and paint arguments are passed to drawPicture() at rasterization + * time. + */ + static std::unique_ptr<SkImageGenerator> MakeFromPicture(const SkISize&, sk_sp<SkPicture>, + const SkMatrix*, const SkPaint*, + SkImage::BitDepth, + sk_sp<SkColorSpace>); + +protected: + static constexpr int kNeedNewImageUniqueID = 0; + + SkImageGenerator(const SkImageInfo& info, uint32_t uniqueId = kNeedNewImageUniqueID); + + virtual sk_sp<SkData> onRefEncodedData() { return nullptr; } + struct Options {}; + virtual bool onGetPixels(const SkImageInfo&, void*, size_t, const Options&) { return false; } + virtual bool onIsValid(GrRecordingContext*) const { return true; } + virtual bool onQueryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes&, + SkYUVAPixmapInfo*) const { return false; } + virtual bool onGetYUVAPlanes(const SkYUVAPixmaps&) { return false; } +#if SK_SUPPORT_GPU + // returns nullptr + virtual GrSurfaceProxyView onGenerateTexture(GrRecordingContext*, const SkImageInfo&, + const SkIPoint&, GrMipmapped, GrImageTexGenPolicy); + + // Most internal SkImageGenerators produce textures and views that use kTopLeft_GrSurfaceOrigin. + // If the generator may produce textures with different origins (e.g. + // GrAHardwareBufferImageGenerator) it should override this function to return the correct + // origin. + virtual GrSurfaceOrigin origin() const { return kTopLeft_GrSurfaceOrigin; } +#endif + +private: + const SkImageInfo fInfo; + const uint32_t fUniqueID; + + friend class SkImage_Lazy; + + // This is our default impl, which may be different on different platforms. + // It is called from NewFromEncoded() after it has checked for any runtime factory. + // The SkData will never be NULL, as that will have been checked by NewFromEncoded. + static std::unique_ptr<SkImageGenerator> MakeFromEncodedImpl(sk_sp<SkData>, + skstd::optional<SkAlphaType>); + + SkImageGenerator(SkImageGenerator&&) = delete; + SkImageGenerator(const SkImageGenerator&) = delete; + SkImageGenerator& operator=(SkImageGenerator&&) = delete; + SkImageGenerator& operator=(const SkImageGenerator&) = delete; +}; + +#endif // SkImageGenerator_DEFINED diff --git a/src/deps/skia/include/core/SkImageInfo.h b/src/deps/skia/include/core/SkImageInfo.h new file mode 100644 index 000000000..99b65e875 --- /dev/null +++ b/src/deps/skia/include/core/SkImageInfo.h @@ -0,0 +1,721 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImageInfo_DEFINED +#define SkImageInfo_DEFINED + +#include "include/core/SkColorSpace.h" +#include "include/core/SkMath.h" +#include "include/core/SkRect.h" +#include "include/core/SkSize.h" + +#include "include/private/SkTFitsIn.h" +#include "include/private/SkTo.h" + +class SkReadBuffer; +class SkWriteBuffer; + +/** \enum SkImageInfo::SkAlphaType + Describes how to interpret the alpha component of a pixel. A pixel may + be opaque, or alpha, describing multiple levels of transparency. + + In simple blending, alpha weights the draw color and the destination + color to create a new color. If alpha describes a weight from zero to one: + + new color = draw color * alpha + destination color * (1 - alpha) + + In practice alpha is encoded in two or more bits, where 1.0 equals all bits set. + + RGB may have alpha included in each component value; the stored + value is the original RGB multiplied by alpha. Premultiplied color + components improve performance. +*/ +enum SkAlphaType : int { + kUnknown_SkAlphaType, //!< uninitialized + kOpaque_SkAlphaType, //!< pixel is opaque + kPremul_SkAlphaType, //!< pixel components are premultiplied by alpha + kUnpremul_SkAlphaType, //!< pixel components are independent of alpha + kLastEnum_SkAlphaType = kUnpremul_SkAlphaType, //!< last valid value +}; + +/** Returns true if SkAlphaType equals kOpaque_SkAlphaType. + + kOpaque_SkAlphaType is a hint that the SkColorType is opaque, or that all + alpha values are set to their 1.0 equivalent. If SkAlphaType is + kOpaque_SkAlphaType, and SkColorType is not opaque, then the result of + drawing any pixel with a alpha value less than 1.0 is undefined. +*/ +static inline bool SkAlphaTypeIsOpaque(SkAlphaType at) { + return kOpaque_SkAlphaType == at; +} + +/////////////////////////////////////////////////////////////////////////////// + +/** \enum SkImageInfo::SkColorType + Describes how pixel bits encode color. A pixel may be an alpha mask, a grayscale, RGB, or ARGB. + + kN32_SkColorType selects the native 32-bit ARGB format for the current configuration. This can + lead to inconsistent results across platforms, so use with caution. +*/ +enum SkColorType : int { + kUnknown_SkColorType, //!< uninitialized + kAlpha_8_SkColorType, //!< pixel with alpha in 8-bit byte + kRGB_565_SkColorType, //!< pixel with 5 bits red, 6 bits green, 5 bits blue, in 16-bit word + kARGB_4444_SkColorType, //!< pixel with 4 bits for alpha, red, green, blue; in 16-bit word + kRGBA_8888_SkColorType, //!< pixel with 8 bits for red, green, blue, alpha; in 32-bit word + kRGB_888x_SkColorType, //!< pixel with 8 bits each for red, green, blue; in 32-bit word + kBGRA_8888_SkColorType, //!< pixel with 8 bits for blue, green, red, alpha; in 32-bit word + kRGBA_1010102_SkColorType, //!< 10 bits for red, green, blue; 2 bits for alpha; in 32-bit word + kBGRA_1010102_SkColorType, //!< 10 bits for blue, green, red; 2 bits for alpha; in 32-bit word + kRGB_101010x_SkColorType, //!< pixel with 10 bits each for red, green, blue; in 32-bit word + kBGR_101010x_SkColorType, //!< pixel with 10 bits each for blue, green, red; in 32-bit word + kGray_8_SkColorType, //!< pixel with grayscale level in 8-bit byte + kRGBA_F16Norm_SkColorType, //!< pixel with half floats in [0,1] for red, green, blue, alpha; + // in 64-bit word + kRGBA_F16_SkColorType, //!< pixel with half floats for red, green, blue, alpha; + // in 64-bit word + kRGBA_F32_SkColorType, //!< pixel using C float for red, green, blue, alpha; in 128-bit word + + // The following 6 colortypes are just for reading from - not for rendering to + kR8G8_unorm_SkColorType, //!< pixel with a uint8_t for red and green + + kA16_float_SkColorType, //!< pixel with a half float for alpha + kR16G16_float_SkColorType, //!< pixel with a half float for red and green + + kA16_unorm_SkColorType, //!< pixel with a little endian uint16_t for alpha + kR16G16_unorm_SkColorType, //!< pixel with a little endian uint16_t for red and green + kR16G16B16A16_unorm_SkColorType, //!< pixel with a little endian uint16_t for red, green, blue + // and alpha + + kSRGBA_8888_SkColorType, + + kLastEnum_SkColorType = kSRGBA_8888_SkColorType, //!< last valid value + +#if SK_PMCOLOR_BYTE_ORDER(B,G,R,A) + kN32_SkColorType = kBGRA_8888_SkColorType,//!< native 32-bit BGRA encoding + +#elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A) + kN32_SkColorType = kRGBA_8888_SkColorType,//!< native 32-bit RGBA encoding + +#else + #error "SK_*32_SHIFT values must correspond to BGRA or RGBA byte order" +#endif +}; + +/** Returns the number of bytes required to store a pixel, including unused padding. + Returns zero if ct is kUnknown_SkColorType or invalid. + + @return bytes per pixel +*/ +SK_API int SkColorTypeBytesPerPixel(SkColorType ct); + +/** Returns true if SkColorType always decodes alpha to 1.0, making the pixel + fully opaque. If true, SkColorType does not reserve bits to encode alpha. + + @return true if alpha is always set to 1.0 +*/ +SK_API bool SkColorTypeIsAlwaysOpaque(SkColorType ct); + +/** Returns true if canonical can be set to a valid SkAlphaType for colorType. If + there is more than one valid canonical SkAlphaType, set to alphaType, if valid. + If true is returned and canonical is not nullptr, store valid SkAlphaType. + + Returns false only if alphaType is kUnknown_SkAlphaType, color type is not + kUnknown_SkColorType, and SkColorType is not always opaque. If false is returned, + canonical is ignored. + + @param canonical storage for SkAlphaType + @return true if valid SkAlphaType can be associated with colorType +*/ +SK_API bool SkColorTypeValidateAlphaType(SkColorType colorType, SkAlphaType alphaType, + SkAlphaType* canonical = nullptr); + +/** \enum SkImageInfo::SkYUVColorSpace + Describes color range of YUV pixels. The color mapping from YUV to RGB varies + depending on the source. YUV pixels may be generated by JPEG images, standard + video streams, or high definition video streams. Each has its own mapping from + YUV to RGB. + + JPEG YUV values encode the full range of 0 to 255 for all three components. + Video YUV values often range from 16 to 235 for Y and from 16 to 240 for U and V (limited). + Details of encoding and conversion to RGB are described in YCbCr color space. + + The identity colorspace exists to provide a utility mapping from Y to R, U to G and V to B. + It can be used to visualize the YUV planes or to explicitly post process the YUV channels. +*/ +enum SkYUVColorSpace : int { + kJPEG_Full_SkYUVColorSpace, //!< describes full range + kRec601_Limited_SkYUVColorSpace, //!< describes SDTV range + kRec709_Full_SkYUVColorSpace, //!< describes HDTV range + kRec709_Limited_SkYUVColorSpace, + kBT2020_8bit_Full_SkYUVColorSpace, //!< describes UHDTV range, non-constant-luminance + kBT2020_8bit_Limited_SkYUVColorSpace, + kBT2020_10bit_Full_SkYUVColorSpace, + kBT2020_10bit_Limited_SkYUVColorSpace, + kBT2020_12bit_Full_SkYUVColorSpace, + kBT2020_12bit_Limited_SkYUVColorSpace, + kIdentity_SkYUVColorSpace, //!< maps Y->R, U->G, V->B + + kLastEnum_SkYUVColorSpace = kIdentity_SkYUVColorSpace, //!< last valid value + + // Legacy (deprecated) names: + kJPEG_SkYUVColorSpace = kJPEG_Full_SkYUVColorSpace, + kRec601_SkYUVColorSpace = kRec601_Limited_SkYUVColorSpace, + kRec709_SkYUVColorSpace = kRec709_Limited_SkYUVColorSpace, + kBT2020_SkYUVColorSpace = kBT2020_8bit_Limited_SkYUVColorSpace, +}; + +/** \struct SkColorInfo + Describes pixel and encoding. SkImageInfo can be created from SkColorInfo by + providing dimensions. + + It encodes how pixel bits describe alpha, transparency; color components red, blue, + and green; and SkColorSpace, the range and linearity of colors. +*/ +class SK_API SkColorInfo { +public: + /** Creates an SkColorInfo with kUnknown_SkColorType, kUnknown_SkAlphaType, + and no SkColorSpace. + + @return empty SkImageInfo + */ + SkColorInfo() = default; + + /** Creates SkColorInfo from SkColorType ct, SkAlphaType at, and optionally SkColorSpace cs. + + If SkColorSpace cs is nullptr and SkColorInfo is part of drawing source: SkColorSpace + defaults to sRGB, mapping into SkSurface SkColorSpace. + + Parameters are not validated to see if their values are legal, or that the + combination is supported. + @return created SkColorInfo + */ + SkColorInfo(SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs) + : fColorSpace(std::move(cs)), fColorType(ct), fAlphaType(at) {} + + SkColorInfo(const SkColorInfo&) = default; + SkColorInfo(SkColorInfo&&) = default; + + SkColorInfo& operator=(const SkColorInfo&) = default; + SkColorInfo& operator=(SkColorInfo&&) = default; + + SkColorSpace* colorSpace() const { return fColorSpace.get(); } + sk_sp<SkColorSpace> refColorSpace() const { return fColorSpace; } + SkColorType colorType() const { return fColorType; } + SkAlphaType alphaType() const { return fAlphaType; } + + bool isOpaque() const { + return SkAlphaTypeIsOpaque(fAlphaType) + || SkColorTypeIsAlwaysOpaque(fColorType); + } + + bool gammaCloseToSRGB() const { return fColorSpace && fColorSpace->gammaCloseToSRGB(); } + + /** Does other represent the same color type, alpha type, and color space? */ + bool operator==(const SkColorInfo& other) const { + return fColorType == other.fColorType && fAlphaType == other.fAlphaType && + SkColorSpace::Equals(fColorSpace.get(), other.fColorSpace.get()); + } + + /** Does other represent a different color type, alpha type, or color space? */ + bool operator!=(const SkColorInfo& other) const { return !(*this == other); } + + /** Creates SkColorInfo with same SkColorType, SkColorSpace, with SkAlphaType set + to newAlphaType. + + Created SkColorInfo contains newAlphaType even if it is incompatible with + SkColorType, in which case SkAlphaType in SkColorInfo is ignored. + */ + SkColorInfo makeAlphaType(SkAlphaType newAlphaType) const { + return SkColorInfo(this->colorType(), newAlphaType, this->refColorSpace()); + } + + /** Creates new SkColorInfo with same SkAlphaType, SkColorSpace, with SkColorType + set to newColorType. + */ + SkColorInfo makeColorType(SkColorType newColorType) const { + return SkColorInfo(newColorType, this->alphaType(), this->refColorSpace()); + } + + /** Creates SkColorInfo with same SkAlphaType, SkColorType, with SkColorSpace + set to cs. cs may be nullptr. + */ + SkColorInfo makeColorSpace(sk_sp<SkColorSpace> cs) const { + return SkColorInfo(this->colorType(), this->alphaType(), std::move(cs)); + } + + /** Returns number of bytes per pixel required by SkColorType. + Returns zero if colorType() is kUnknown_SkColorType. + + @return bytes in pixel + + example: https://fiddle.skia.org/c/@ImageInfo_bytesPerPixel + */ + int bytesPerPixel() const; + + /** Returns bit shift converting row bytes to row pixels. + Returns zero for kUnknown_SkColorType. + + @return one of: 0, 1, 2, 3, 4; left shift to convert pixels to bytes + + example: https://fiddle.skia.org/c/@ImageInfo_shiftPerPixel + */ + int shiftPerPixel() const; + +private: + sk_sp<SkColorSpace> fColorSpace; + SkColorType fColorType = kUnknown_SkColorType; + SkAlphaType fAlphaType = kUnknown_SkAlphaType; +}; + +/** \struct SkImageInfo + Describes pixel dimensions and encoding. SkBitmap, SkImage, PixMap, and SkSurface + can be created from SkImageInfo. SkImageInfo can be retrieved from SkBitmap and + SkPixmap, but not from SkImage and SkSurface. For example, SkImage and SkSurface + implementations may defer pixel depth, so may not completely specify SkImageInfo. + + SkImageInfo contains dimensions, the pixel integral width and height. It encodes + how pixel bits describe alpha, transparency; color components red, blue, + and green; and SkColorSpace, the range and linearity of colors. +*/ +struct SK_API SkImageInfo { +public: + + /** Creates an empty SkImageInfo with kUnknown_SkColorType, kUnknown_SkAlphaType, + a width and height of zero, and no SkColorSpace. + + @return empty SkImageInfo + */ + SkImageInfo() = default; + + /** Creates SkImageInfo from integral dimensions width and height, SkColorType ct, + SkAlphaType at, and optionally SkColorSpace cs. + + If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace + defaults to sRGB, mapping into SkSurface SkColorSpace. + + Parameters are not validated to see if their values are legal, or that the + combination is supported. + + @param width pixel column count; must be zero or greater + @param height pixel row count; must be zero or greater + @param cs range of colors; may be nullptr + @return created SkImageInfo + */ + static SkImageInfo Make(int width, int height, SkColorType ct, SkAlphaType at, + sk_sp<SkColorSpace> cs = nullptr) { + return SkImageInfo({width, height}, {ct, at, std::move(cs)}); + } + static SkImageInfo Make(SkISize dimensions, SkColorType ct, SkAlphaType at, + sk_sp<SkColorSpace> cs = nullptr) { + return SkImageInfo(dimensions, {ct, at, std::move(cs)}); + } + + /** Creates SkImageInfo from integral dimensions and SkColorInfo colorInfo, + + Parameters are not validated to see if their values are legal, or that the + combination is supported. + + @param dimensions pixel column and row count; must be zeros or greater + @param SkColorInfo the pixel encoding consisting of SkColorType, SkAlphaType, and + SkColorSpace (which may be nullptr) + @return created SkImageInfo + */ + static SkImageInfo Make(SkISize dimensions, const SkColorInfo& colorInfo) { + return SkImageInfo(dimensions, colorInfo); + } + static SkImageInfo Make(SkISize dimensions, SkColorInfo&& colorInfo) { + return SkImageInfo(dimensions, std::move(colorInfo)); + } + + /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType, + SkAlphaType at, and optionally SkColorSpace cs. kN32_SkColorType will equal either + kBGRA_8888_SkColorType or kRGBA_8888_SkColorType, whichever is optimal. + + If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace + defaults to sRGB, mapping into SkSurface SkColorSpace. + + Parameters are not validated to see if their values are legal, or that the + combination is supported. + + @param width pixel column count; must be zero or greater + @param height pixel row count; must be zero or greater + @param cs range of colors; may be nullptr + @return created SkImageInfo + */ + static SkImageInfo MakeN32(int width, int height, SkAlphaType at, + sk_sp<SkColorSpace> cs = nullptr) { + return Make({width, height}, kN32_SkColorType, at, std::move(cs)); + } + + /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType, + SkAlphaType at, with sRGB SkColorSpace. + + Parameters are not validated to see if their values are legal, or that the + combination is supported. + + @param width pixel column count; must be zero or greater + @param height pixel row count; must be zero or greater + @return created SkImageInfo + + example: https://fiddle.skia.org/c/@ImageInfo_MakeS32 + */ + static SkImageInfo MakeS32(int width, int height, SkAlphaType at); + + /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType, + kPremul_SkAlphaType, with optional SkColorSpace. + + If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace + defaults to sRGB, mapping into SkSurface SkColorSpace. + + Parameters are not validated to see if their values are legal, or that the + combination is supported. + + @param width pixel column count; must be zero or greater + @param height pixel row count; must be zero or greater + @param cs range of colors; may be nullptr + @return created SkImageInfo + */ + static SkImageInfo MakeN32Premul(int width, int height, sk_sp<SkColorSpace> cs = nullptr) { + return Make({width, height}, kN32_SkColorType, kPremul_SkAlphaType, std::move(cs)); + } + + /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType, + kPremul_SkAlphaType, with SkColorSpace set to nullptr. + + If SkImageInfo is part of drawing source: SkColorSpace defaults to sRGB, mapping + into SkSurface SkColorSpace. + + Parameters are not validated to see if their values are legal, or that the + combination is supported. + + @param dimensions width and height, each must be zero or greater + @param cs range of colors; may be nullptr + @return created SkImageInfo + */ + static SkImageInfo MakeN32Premul(SkISize dimensions, sk_sp<SkColorSpace> cs = nullptr) { + return Make(dimensions, kN32_SkColorType, kPremul_SkAlphaType, std::move(cs)); + } + + /** Creates SkImageInfo from integral dimensions width and height, kAlpha_8_SkColorType, + kPremul_SkAlphaType, with SkColorSpace set to nullptr. + + @param width pixel column count; must be zero or greater + @param height pixel row count; must be zero or greater + @return created SkImageInfo + */ + static SkImageInfo MakeA8(int width, int height) { + return Make({width, height}, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr); + } + /** Creates SkImageInfo from integral dimensions, kAlpha_8_SkColorType, + kPremul_SkAlphaType, with SkColorSpace set to nullptr. + + @param dimensions pixel row and column count; must be zero or greater + @return created SkImageInfo + */ + static SkImageInfo MakeA8(SkISize dimensions) { + return Make(dimensions, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr); + } + + /** Creates SkImageInfo from integral dimensions width and height, kUnknown_SkColorType, + kUnknown_SkAlphaType, with SkColorSpace set to nullptr. + + Returned SkImageInfo as part of source does not draw, and as part of destination + can not be drawn to. + + @param width pixel column count; must be zero or greater + @param height pixel row count; must be zero or greater + @return created SkImageInfo + */ + static SkImageInfo MakeUnknown(int width, int height) { + return Make({width, height}, kUnknown_SkColorType, kUnknown_SkAlphaType, nullptr); + } + + /** Creates SkImageInfo from integral dimensions width and height set to zero, + kUnknown_SkColorType, kUnknown_SkAlphaType, with SkColorSpace set to nullptr. + + Returned SkImageInfo as part of source does not draw, and as part of destination + can not be drawn to. + + @return created SkImageInfo + */ + static SkImageInfo MakeUnknown() { + return MakeUnknown(0, 0); + } + + /** Returns pixel count in each row. + + @return pixel width + */ + int width() const { return fDimensions.width(); } + + /** Returns pixel row count. + + @return pixel height + */ + int height() const { return fDimensions.height(); } + + SkColorType colorType() const { return fColorInfo.colorType(); } + + SkAlphaType alphaType() const { return fColorInfo.alphaType(); } + + /** Returns SkColorSpace, the range of colors. The reference count of + SkColorSpace is unchanged. The returned SkColorSpace is immutable. + + @return SkColorSpace, or nullptr + */ + SkColorSpace* colorSpace() const { return fColorInfo.colorSpace(); } + + /** Returns smart pointer to SkColorSpace, the range of colors. The smart pointer + tracks the number of objects sharing this SkColorSpace reference so the memory + is released when the owners destruct. + + The returned SkColorSpace is immutable. + + @return SkColorSpace wrapped in a smart pointer + */ + sk_sp<SkColorSpace> refColorSpace() const { return fColorInfo.refColorSpace(); } + + /** Returns if SkImageInfo describes an empty area of pixels by checking if either + width or height is zero or smaller. + + @return true if either dimension is zero or smaller + */ + bool isEmpty() const { return fDimensions.isEmpty(); } + + /** Returns the dimensionless SkColorInfo that represents the same color type, + alpha type, and color space as this SkImageInfo. + */ + const SkColorInfo& colorInfo() const { return fColorInfo; } + + /** Returns true if SkAlphaType is set to hint that all pixels are opaque; their + alpha value is implicitly or explicitly 1.0. If true, and all pixels are + not opaque, Skia may draw incorrectly. + + Does not check if SkColorType allows alpha, or if any pixel value has + transparency. + + @return true if SkAlphaType is kOpaque_SkAlphaType + */ + bool isOpaque() const { return fColorInfo.isOpaque(); } + + /** Returns SkISize { width(), height() }. + + @return integral size of width() and height() + */ + SkISize dimensions() const { return fDimensions; } + + /** Returns SkIRect { 0, 0, width(), height() }. + + @return integral rectangle from origin to width() and height() + */ + SkIRect bounds() const { return SkIRect::MakeSize(fDimensions); } + + /** Returns true if associated SkColorSpace is not nullptr, and SkColorSpace gamma + is approximately the same as sRGB. + This includes the + + @return true if SkColorSpace gamma is approximately the same as sRGB + */ + bool gammaCloseToSRGB() const { return fColorInfo.gammaCloseToSRGB(); } + + /** Creates SkImageInfo with the same SkColorType, SkColorSpace, and SkAlphaType, + with dimensions set to width and height. + + @param newWidth pixel column count; must be zero or greater + @param newHeight pixel row count; must be zero or greater + @return created SkImageInfo + */ + SkImageInfo makeWH(int newWidth, int newHeight) const { + return Make({newWidth, newHeight}, fColorInfo); + } + + /** Creates SkImageInfo with the same SkColorType, SkColorSpace, and SkAlphaType, + with dimensions set to newDimensions. + + @param newSize pixel column and row count; must be zero or greater + @return created SkImageInfo + */ + SkImageInfo makeDimensions(SkISize newSize) const { + return Make(newSize, fColorInfo); + } + + /** Creates SkImageInfo with same SkColorType, SkColorSpace, width, and height, + with SkAlphaType set to newAlphaType. + + Created SkImageInfo contains newAlphaType even if it is incompatible with + SkColorType, in which case SkAlphaType in SkImageInfo is ignored. + + @return created SkImageInfo + */ + SkImageInfo makeAlphaType(SkAlphaType newAlphaType) const { + return Make(fDimensions, fColorInfo.makeAlphaType(newAlphaType)); + } + + /** Creates SkImageInfo with same SkAlphaType, SkColorSpace, width, and height, + with SkColorType set to newColorType. + + @return created SkImageInfo + */ + SkImageInfo makeColorType(SkColorType newColorType) const { + return Make(fDimensions, fColorInfo.makeColorType(newColorType)); + } + + /** Creates SkImageInfo with same SkAlphaType, SkColorType, width, and height, + with SkColorSpace set to cs. + + @param cs range of colors; may be nullptr + @return created SkImageInfo + */ + SkImageInfo makeColorSpace(sk_sp<SkColorSpace> cs) const { + return Make(fDimensions, fColorInfo.makeColorSpace(std::move(cs))); + } + + /** Returns number of bytes per pixel required by SkColorType. + Returns zero if colorType( is kUnknown_SkColorType. + + @return bytes in pixel + */ + int bytesPerPixel() const { return fColorInfo.bytesPerPixel(); } + + /** Returns bit shift converting row bytes to row pixels. + Returns zero for kUnknown_SkColorType. + + @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes + */ + int shiftPerPixel() const { return fColorInfo.shiftPerPixel(); } + + /** Returns minimum bytes per row, computed from pixel width() and SkColorType, which + specifies bytesPerPixel(). SkBitmap maximum value for row bytes must fit + in 31 bits. + + @return width() times bytesPerPixel() as unsigned 64-bit integer + */ + uint64_t minRowBytes64() const { + return (uint64_t)sk_64_mul(this->width(), this->bytesPerPixel()); + } + + /** Returns minimum bytes per row, computed from pixel width() and SkColorType, which + specifies bytesPerPixel(). SkBitmap maximum value for row bytes must fit + in 31 bits. + + @return width() times bytesPerPixel() as size_t + */ + size_t minRowBytes() const { + uint64_t minRowBytes = this->minRowBytes64(); + if (!SkTFitsIn<int32_t>(minRowBytes)) { + return 0; + } + return (size_t)minRowBytes; + } + + /** Returns byte offset of pixel from pixel base address. + + Asserts in debug build if x or y is outside of bounds. Does not assert if + rowBytes is smaller than minRowBytes(), even though result may be incorrect. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @param rowBytes size of pixel row or larger + @return offset within pixel array + + example: https://fiddle.skia.org/c/@ImageInfo_computeOffset + */ + size_t computeOffset(int x, int y, size_t rowBytes) const; + + /** Compares SkImageInfo with other, and returns true if width, height, SkColorType, + SkAlphaType, and SkColorSpace are equivalent. + + @param other SkImageInfo to compare + @return true if SkImageInfo equals other + */ + bool operator==(const SkImageInfo& other) const { + return fDimensions == other.fDimensions && fColorInfo == other.fColorInfo; + } + + /** Compares SkImageInfo with other, and returns true if width, height, SkColorType, + SkAlphaType, and SkColorSpace are not equivalent. + + @param other SkImageInfo to compare + @return true if SkImageInfo is not equal to other + */ + bool operator!=(const SkImageInfo& other) const { + return !(*this == other); + } + + /** Returns storage required by pixel array, given SkImageInfo dimensions, SkColorType, + and rowBytes. rowBytes is assumed to be at least as large as minRowBytes(). + + Returns zero if height is zero. + Returns SIZE_MAX if answer exceeds the range of size_t. + + @param rowBytes size of pixel row or larger + @return memory required by pixel buffer + + example: https://fiddle.skia.org/c/@ImageInfo_computeByteSize + */ + size_t computeByteSize(size_t rowBytes) const; + + /** Returns storage required by pixel array, given SkImageInfo dimensions, and + SkColorType. Uses minRowBytes() to compute bytes for pixel row. + + Returns zero if height is zero. + Returns SIZE_MAX if answer exceeds the range of size_t. + + @return least memory required by pixel buffer + */ + size_t computeMinByteSize() const { + return this->computeByteSize(this->minRowBytes()); + } + + /** Returns true if byteSize equals SIZE_MAX. computeByteSize() and + computeMinByteSize() return SIZE_MAX if size_t can not hold buffer size. + + @param byteSize result of computeByteSize() or computeMinByteSize() + @return true if computeByteSize() or computeMinByteSize() result exceeds size_t + */ + static bool ByteSizeOverflowed(size_t byteSize) { + return SIZE_MAX == byteSize; + } + + /** Returns true if rowBytes is valid for this SkImageInfo. + + @param rowBytes size of pixel row including padding + @return true if rowBytes is large enough to contain pixel row and is properly + aligned + */ + bool validRowBytes(size_t rowBytes) const { + if (rowBytes < this->minRowBytes64()) { + return false; + } + int shift = this->shiftPerPixel(); + size_t alignedRowBytes = rowBytes >> shift << shift; + return alignedRowBytes == rowBytes; + } + + /** Creates an empty SkImageInfo with kUnknown_SkColorType, kUnknown_SkAlphaType, + a width and height of zero, and no SkColorSpace. + */ + void reset() { *this = {}; } + + /** Asserts if internal values are illegal or inconsistent. Only available if + SK_DEBUG is defined at compile time. + */ + SkDEBUGCODE(void validate() const;) + +private: + SkColorInfo fColorInfo; + SkISize fDimensions = {0, 0}; + + SkImageInfo(SkISize dimensions, const SkColorInfo& colorInfo) + : fColorInfo(colorInfo), fDimensions(dimensions) {} + + SkImageInfo(SkISize dimensions, SkColorInfo&& colorInfo) + : fColorInfo(std::move(colorInfo)), fDimensions(dimensions) {} +}; + +#endif diff --git a/src/deps/skia/include/core/SkM44.h b/src/deps/skia/include/core/SkM44.h new file mode 100644 index 000000000..ae08b4400 --- /dev/null +++ b/src/deps/skia/include/core/SkM44.h @@ -0,0 +1,426 @@ +/* + * Copyright 2020 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkM44_DEFINED +#define SkM44_DEFINED + +#include "include/core/SkMatrix.h" +#include "include/core/SkRect.h" +#include "include/core/SkScalar.h" + +struct SK_API SkV2 { + float x, y; + + bool operator==(const SkV2 v) const { return x == v.x && y == v.y; } + bool operator!=(const SkV2 v) const { return !(*this == v); } + + static SkScalar Dot(SkV2 a, SkV2 b) { return a.x * b.x + a.y * b.y; } + static SkScalar Cross(SkV2 a, SkV2 b) { return a.x * b.y - a.y * b.x; } + static SkV2 Normalize(SkV2 v) { return v * (1.0f / v.length()); } + + SkV2 operator-() const { return {-x, -y}; } + SkV2 operator+(SkV2 v) const { return {x+v.x, y+v.y}; } + SkV2 operator-(SkV2 v) const { return {x-v.x, y-v.y}; } + + SkV2 operator*(SkV2 v) const { return {x*v.x, y*v.y}; } + friend SkV2 operator*(SkV2 v, SkScalar s) { return {v.x*s, v.y*s}; } + friend SkV2 operator*(SkScalar s, SkV2 v) { return {v.x*s, v.y*s}; } + friend SkV2 operator/(SkV2 v, SkScalar s) { return {v.x/s, v.y/s}; } + + void operator+=(SkV2 v) { *this = *this + v; } + void operator-=(SkV2 v) { *this = *this - v; } + void operator*=(SkV2 v) { *this = *this * v; } + void operator*=(SkScalar s) { *this = *this * s; } + void operator/=(SkScalar s) { *this = *this / s; } + + SkScalar lengthSquared() const { return Dot(*this, *this); } + SkScalar length() const { return SkScalarSqrt(this->lengthSquared()); } + + SkScalar dot(SkV2 v) const { return Dot(*this, v); } + SkScalar cross(SkV2 v) const { return Cross(*this, v); } + SkV2 normalize() const { return Normalize(*this); } + + const float* ptr() const { return &x; } + float* ptr() { return &x; } +}; + +struct SK_API SkV3 { + float x, y, z; + + bool operator==(const SkV3& v) const { + return x == v.x && y == v.y && z == v.z; + } + bool operator!=(const SkV3& v) const { return !(*this == v); } + + static SkScalar Dot(const SkV3& a, const SkV3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } + static SkV3 Cross(const SkV3& a, const SkV3& b) { + return { a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x }; + } + static SkV3 Normalize(const SkV3& v) { return v * (1.0f / v.length()); } + + SkV3 operator-() const { return {-x, -y, -z}; } + SkV3 operator+(const SkV3& v) const { return { x + v.x, y + v.y, z + v.z }; } + SkV3 operator-(const SkV3& v) const { return { x - v.x, y - v.y, z - v.z }; } + + SkV3 operator*(const SkV3& v) const { + return { x*v.x, y*v.y, z*v.z }; + } + friend SkV3 operator*(const SkV3& v, SkScalar s) { + return { v.x*s, v.y*s, v.z*s }; + } + friend SkV3 operator*(SkScalar s, const SkV3& v) { return v*s; } + + void operator+=(SkV3 v) { *this = *this + v; } + void operator-=(SkV3 v) { *this = *this - v; } + void operator*=(SkV3 v) { *this = *this * v; } + void operator*=(SkScalar s) { *this = *this * s; } + + SkScalar lengthSquared() const { return Dot(*this, *this); } + SkScalar length() const { return SkScalarSqrt(Dot(*this, *this)); } + + SkScalar dot(const SkV3& v) const { return Dot(*this, v); } + SkV3 cross(const SkV3& v) const { return Cross(*this, v); } + SkV3 normalize() const { return Normalize(*this); } + + const float* ptr() const { return &x; } + float* ptr() { return &x; } +}; + +struct SK_API SkV4 { + float x, y, z, w; + + bool operator==(const SkV4& v) const { + return x == v.x && y == v.y && z == v.z && w == v.w; + } + bool operator!=(const SkV4& v) const { return !(*this == v); } + + SkV4 operator-() const { return {-x, -y, -z, -w}; } + SkV4 operator+(const SkV4& v) const { return { x + v.x, y + v.y, z + v.z, w + v.w }; } + SkV4 operator-(const SkV4& v) const { return { x - v.x, y - v.y, z - v.z, w - v.w }; } + + SkV4 operator*(const SkV4& v) const { + return { x*v.x, y*v.y, z*v.z, w*v.w }; + } + friend SkV4 operator*(const SkV4& v, SkScalar s) { + return { v.x*s, v.y*s, v.z*s, v.w*s }; + } + friend SkV4 operator*(SkScalar s, const SkV4& v) { return v*s; } + + const float* ptr() const { return &x; } + float* ptr() { return &x; } + + float operator[](int i) const { + SkASSERT(i >= 0 && i < 4); + return this->ptr()[i]; + } + float& operator[](int i) { + SkASSERT(i >= 0 && i < 4); + return this->ptr()[i]; + } +}; + +/** + * 4x4 matrix used by SkCanvas and other parts of Skia. + * + * Skia assumes a right-handed coordinate system: + * +X goes to the right + * +Y goes down + * +Z goes into the screen (away from the viewer) + */ +class SK_API SkM44 { +public: + SkM44(const SkM44& src) = default; + SkM44& operator=(const SkM44& src) = default; + + constexpr SkM44() + : fMat{1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1} + {} + + SkM44(const SkM44& a, const SkM44& b) { + this->setConcat(a, b); + } + + enum Uninitialized_Constructor { + kUninitialized_Constructor + }; + SkM44(Uninitialized_Constructor) {} + + enum NaN_Constructor { + kNaN_Constructor + }; + constexpr SkM44(NaN_Constructor) + : fMat{SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, + SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, + SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, + SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN} + {} + + /** + * The constructor parameters are in row-major order. + */ + constexpr SkM44(SkScalar m0, SkScalar m4, SkScalar m8, SkScalar m12, + SkScalar m1, SkScalar m5, SkScalar m9, SkScalar m13, + SkScalar m2, SkScalar m6, SkScalar m10, SkScalar m14, + SkScalar m3, SkScalar m7, SkScalar m11, SkScalar m15) + // fMat is column-major order in memory. + : fMat{m0, m1, m2, m3, + m4, m5, m6, m7, + m8, m9, m10, m11, + m12, m13, m14, m15} + {} + + static SkM44 Rows(const SkV4& r0, const SkV4& r1, const SkV4& r2, const SkV4& r3) { + SkM44 m(kUninitialized_Constructor); + m.setRow(0, r0); + m.setRow(1, r1); + m.setRow(2, r2); + m.setRow(3, r3); + return m; + } + static SkM44 Cols(const SkV4& c0, const SkV4& c1, const SkV4& c2, const SkV4& c3) { + SkM44 m(kUninitialized_Constructor); + m.setCol(0, c0); + m.setCol(1, c1); + m.setCol(2, c2); + m.setCol(3, c3); + return m; + } + + static SkM44 RowMajor(const SkScalar r[16]) { + return SkM44(r[ 0], r[ 1], r[ 2], r[ 3], + r[ 4], r[ 5], r[ 6], r[ 7], + r[ 8], r[ 9], r[10], r[11], + r[12], r[13], r[14], r[15]); + } + static SkM44 ColMajor(const SkScalar c[16]) { + return SkM44(c[0], c[4], c[ 8], c[12], + c[1], c[5], c[ 9], c[13], + c[2], c[6], c[10], c[14], + c[3], c[7], c[11], c[15]); + } + + static SkM44 Translate(SkScalar x, SkScalar y, SkScalar z = 0) { + return SkM44(1, 0, 0, x, + 0, 1, 0, y, + 0, 0, 1, z, + 0, 0, 0, 1); + } + + static SkM44 Scale(SkScalar x, SkScalar y, SkScalar z = 1) { + return SkM44(x, 0, 0, 0, + 0, y, 0, 0, + 0, 0, z, 0, + 0, 0, 0, 1); + } + + static SkM44 Rotate(SkV3 axis, SkScalar radians) { + SkM44 m(kUninitialized_Constructor); + m.setRotate(axis, radians); + return m; + } + + // Scales and translates 'src' to fill 'dst' exactly. + static SkM44 RectToRect(const SkRect& src, const SkRect& dst); + + static SkM44 LookAt(const SkV3& eye, const SkV3& center, const SkV3& up); + static SkM44 Perspective(float near, float far, float angle); + + bool operator==(const SkM44& other) const; + bool operator!=(const SkM44& other) const { + return !(other == *this); + } + + void getColMajor(SkScalar v[]) const { + memcpy(v, fMat, sizeof(fMat)); + } + void getRowMajor(SkScalar v[]) const; + + SkScalar rc(int r, int c) const { + SkASSERT(r >= 0 && r <= 3); + SkASSERT(c >= 0 && c <= 3); + return fMat[c*4 + r]; + } + void setRC(int r, int c, SkScalar value) { + SkASSERT(r >= 0 && r <= 3); + SkASSERT(c >= 0 && c <= 3); + fMat[c*4 + r] = value; + } + + SkV4 row(int i) const { + SkASSERT(i >= 0 && i <= 3); + return {fMat[i + 0], fMat[i + 4], fMat[i + 8], fMat[i + 12]}; + } + SkV4 col(int i) const { + SkASSERT(i >= 0 && i <= 3); + return {fMat[i*4 + 0], fMat[i*4 + 1], fMat[i*4 + 2], fMat[i*4 + 3]}; + } + + void setRow(int i, const SkV4& v) { + SkASSERT(i >= 0 && i <= 3); + fMat[i + 0] = v.x; + fMat[i + 4] = v.y; + fMat[i + 8] = v.z; + fMat[i + 12] = v.w; + } + void setCol(int i, const SkV4& v) { + SkASSERT(i >= 0 && i <= 3); + memcpy(&fMat[i*4], v.ptr(), sizeof(v)); + } + + SkM44& setIdentity() { + *this = { 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1 }; + return *this; + } + + SkM44& setTranslate(SkScalar x, SkScalar y, SkScalar z = 0) { + *this = { 1, 0, 0, x, + 0, 1, 0, y, + 0, 0, 1, z, + 0, 0, 0, 1 }; + return *this; + } + + SkM44& setScale(SkScalar x, SkScalar y, SkScalar z = 1) { + *this = { x, 0, 0, 0, + 0, y, 0, 0, + 0, 0, z, 0, + 0, 0, 0, 1 }; + return *this; + } + + /** + * Set this matrix to rotate about the specified unit-length axis vector, + * by an angle specified by its sin() and cos(). + * + * This does not attempt to verify that axis.length() == 1 or that the sin,cos values + * are correct. + */ + SkM44& setRotateUnitSinCos(SkV3 axis, SkScalar sinAngle, SkScalar cosAngle); + + /** + * Set this matrix to rotate about the specified unit-length axis vector, + * by an angle specified in radians. + * + * This does not attempt to verify that axis.length() == 1. + */ + SkM44& setRotateUnit(SkV3 axis, SkScalar radians) { + return this->setRotateUnitSinCos(axis, SkScalarSin(radians), SkScalarCos(radians)); + } + + /** + * Set this matrix to rotate about the specified axis vector, + * by an angle specified in radians. + * + * Note: axis is not assumed to be unit-length, so it will be normalized internally. + * If axis is already unit-length, call setRotateAboutUnitRadians() instead. + */ + SkM44& setRotate(SkV3 axis, SkScalar radians); + + SkM44& setConcat(const SkM44& a, const SkM44& b); + + friend SkM44 operator*(const SkM44& a, const SkM44& b) { + return SkM44(a, b); + } + + SkM44& preConcat(const SkM44& m) { + return this->setConcat(*this, m); + } + + SkM44& postConcat(const SkM44& m) { + return this->setConcat(m, *this); + } + + /** + * A matrix is categorized as 'perspective' if the bottom row is not [0, 0, 0, 1]. + * For most uses, a bottom row of [0, 0, 0, X] behaves like a non-perspective matrix, though + * it will be categorized as perspective. Calling normalizePerspective() will change the + * matrix such that, if its bottom row was [0, 0, 0, X], it will be changed to [0, 0, 0, 1] + * by scaling the rest of the matrix by 1/X. + * + * | A B C D | | A/X B/X C/X D/X | + * | E F G H | -> | E/X F/X G/X H/X | for X != 0 + * | I J K L | | I/X J/X K/X L/X | + * | 0 0 0 X | | 0 0 0 1 | + */ + void normalizePerspective(); + + /** Returns true if all elements of the matrix are finite. Returns false if any + element is infinity, or NaN. + + @return true if matrix has only finite elements + */ + bool isFinite() const { return SkScalarsAreFinite(fMat, 16); } + + /** If this is invertible, return that in inverse and return true. If it is + * not invertible, return false and leave the inverse parameter unchanged. + */ + bool SK_WARN_UNUSED_RESULT invert(SkM44* inverse) const; + + SkM44 SK_WARN_UNUSED_RESULT transpose() const; + + void dump() const; + + //////////// + + SkV4 map(float x, float y, float z, float w) const; + SkV4 operator*(const SkV4& v) const { + return this->map(v.x, v.y, v.z, v.w); + } + SkV3 operator*(SkV3 v) const { + auto v4 = this->map(v.x, v.y, v.z, 0); + return {v4.x, v4.y, v4.z}; + } + ////////////////////// Converting to/from SkMatrix + + /* When converting from SkM44 to SkMatrix, the third row and + * column is dropped. When converting from SkMatrix to SkM44 + * the third row and column remain as identity: + * [ a b c ] [ a b 0 c ] + * [ d e f ] -> [ d e 0 f ] + * [ g h i ] [ 0 0 1 0 ] + * [ g h 0 i ] + */ + SkMatrix asM33() const { + return SkMatrix::MakeAll(fMat[0], fMat[4], fMat[12], + fMat[1], fMat[5], fMat[13], + fMat[3], fMat[7], fMat[15]); + } + + explicit SkM44(const SkMatrix& src) + : SkM44(src[SkMatrix::kMScaleX], src[SkMatrix::kMSkewX], 0, src[SkMatrix::kMTransX], + src[SkMatrix::kMSkewY], src[SkMatrix::kMScaleY], 0, src[SkMatrix::kMTransY], + 0, 0, 1, 0, + src[SkMatrix::kMPersp0], src[SkMatrix::kMPersp1], 0, src[SkMatrix::kMPersp2]) + {} + + SkM44& preTranslate(SkScalar x, SkScalar y, SkScalar z = 0); + SkM44& postTranslate(SkScalar x, SkScalar y, SkScalar z = 0); + + SkM44& preScale(SkScalar x, SkScalar y); + SkM44& preScale(SkScalar x, SkScalar y, SkScalar z); + SkM44& preConcat(const SkMatrix&); + +private: + /* Stored in column-major. + * Indices + * 0 4 8 12 1 0 0 trans_x + * 1 5 9 13 e.g. 0 1 0 trans_y + * 2 6 10 14 0 0 1 trans_z + * 3 7 11 15 0 0 0 1 + */ + SkScalar fMat[16]; + + friend class SkMatrixPriv; +}; + +#endif diff --git a/src/deps/skia/include/core/SkMallocPixelRef.h b/src/deps/skia/include/core/SkMallocPixelRef.h new file mode 100644 index 000000000..cce54b50f --- /dev/null +++ b/src/deps/skia/include/core/SkMallocPixelRef.h @@ -0,0 +1,42 @@ +/* + * Copyright 2008 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkMallocPixelRef_DEFINED +#define SkMallocPixelRef_DEFINED + +#include "include/core/SkPixelRef.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" +class SkData; +struct SkImageInfo; + +/** We explicitly use the same allocator for our pixels that SkMask does, + so that we can freely assign memory allocated by one class to the other. +*/ +namespace SkMallocPixelRef { + /** + * Return a new SkMallocPixelRef, automatically allocating storage for the + * pixels. If rowBytes are 0, an optimal value will be chosen automatically. + * If rowBytes is > 0, then it will be respected, or NULL will be returned + * if rowBytes is invalid for the specified info. + * + * All pixel bytes are zeroed. + * + * Returns NULL on failure. + */ + SK_API sk_sp<SkPixelRef> MakeAllocate(const SkImageInfo&, size_t rowBytes); + + /** + * Return a new SkMallocPixelRef that will use the provided SkData and + * rowBytes as pixel storage. The SkData will be ref()ed and on + * destruction of the PixelRef, the SkData will be unref()ed. + * + * Returns NULL on failure. + */ + SK_API sk_sp<SkPixelRef> MakeWithData(const SkImageInfo&, size_t rowBytes, sk_sp<SkData> data); +} // namespace SkMallocPixelRef +#endif diff --git a/src/deps/skia/include/core/SkMaskFilter.h b/src/deps/skia/include/core/SkMaskFilter.h new file mode 100644 index 000000000..3fde51b71 --- /dev/null +++ b/src/deps/skia/include/core/SkMaskFilter.h @@ -0,0 +1,50 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkMaskFilter_DEFINED +#define SkMaskFilter_DEFINED + +#include "include/core/SkBlurTypes.h" +#include "include/core/SkCoverageMode.h" +#include "include/core/SkFlattenable.h" +#include "include/core/SkScalar.h" + +class SkMatrix; +struct SkRect; + +/** \class SkMaskFilter + + SkMaskFilter is the base class for object that perform transformations on + the mask before drawing it. An example subclass is Blur. +*/ +class SK_API SkMaskFilter : public SkFlattenable { +public: + /** Create a blur maskfilter. + * @param style The SkBlurStyle to use + * @param sigma Standard deviation of the Gaussian blur to apply. Must be > 0. + * @param respectCTM if true the blur's sigma is modified by the CTM. + * @return The new blur maskfilter + */ + static sk_sp<SkMaskFilter> MakeBlur(SkBlurStyle style, SkScalar sigma, + bool respectCTM = true); + + /** + * Returns the approximate bounds that would result from filtering the src rect. + * The actual result may be different, but it should be contained within the + * returned bounds. + */ + SkRect approximateFilteredBounds(const SkRect& src) const; + + static sk_sp<SkMaskFilter> Deserialize(const void* data, size_t size, + const SkDeserialProcs* procs = nullptr); + +private: + static void RegisterFlattenables(); + friend class SkFlattenable; +}; + +#endif diff --git a/src/deps/skia/include/core/SkMath.h b/src/deps/skia/include/core/SkMath.h new file mode 100644 index 000000000..97352afab --- /dev/null +++ b/src/deps/skia/include/core/SkMath.h @@ -0,0 +1,54 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkMath_DEFINED +#define SkMath_DEFINED + +#include "include/core/SkTypes.h" + +// 64bit -> 32bit utilities + +// Handy util that can be passed two ints, and will automatically promote to +// 64bits before the multiply, so the caller doesn't have to remember to cast +// e.g. (int64_t)a * b; +static inline int64_t sk_64_mul(int64_t a, int64_t b) { + return a * b; +} + +/////////////////////////////////////////////////////////////////////////////// + +/** + * Returns true if value is a power of 2. Does not explicitly check for + * value <= 0. + */ +template <typename T> constexpr inline bool SkIsPow2(T value) { + return (value & (value - 1)) == 0; +} + +/////////////////////////////////////////////////////////////////////////////// + +/** + * Return a*b/((1 << shift) - 1), rounding any fractional bits. + * Only valid if a and b are unsigned and <= 32767 and shift is > 0 and <= 8 + */ +static inline unsigned SkMul16ShiftRound(U16CPU a, U16CPU b, int shift) { + SkASSERT(a <= 32767); + SkASSERT(b <= 32767); + SkASSERT(shift > 0 && shift <= 8); + unsigned prod = a*b + (1 << (shift - 1)); + return (prod + (prod >> shift)) >> shift; +} + +/** + * Return a*b/255, rounding any fractional bits. + * Only valid if a and b are unsigned and <= 32767. + */ +static inline U8CPU SkMulDiv255Round(U16CPU a, U16CPU b) { + return SkMul16ShiftRound(a,b,8); +} + +#endif diff --git a/src/deps/skia/include/core/SkMatrix.h b/src/deps/skia/include/core/SkMatrix.h new file mode 100644 index 000000000..03140760d --- /dev/null +++ b/src/deps/skia/include/core/SkMatrix.h @@ -0,0 +1,1986 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkMatrix_DEFINED +#define SkMatrix_DEFINED + +#include "include/core/SkRect.h" +#include "include/private/SkMacros.h" +#include "include/private/SkTo.h" + +struct SkRSXform; +struct SkPoint3; + +// Remove when clients are updated to live without this +#define SK_SUPPORT_LEGACY_MATRIX_RECTTORECT + +/** + * When we transform points through a matrix containing perspective (the bottom row is something + * other than 0,0,1), the bruteforce math can produce confusing results (since we might divide + * by 0, or a negative w value). By default, methods that map rects and paths will apply + * perspective clipping, but this can be changed by specifying kYes to those methods. + */ +enum class SkApplyPerspectiveClip { + kNo, //!< Don't pre-clip the geometry before applying the (perspective) matrix + kYes, //!< Do pre-clip the geometry before applying the (perspective) matrix +}; + +/** \class SkMatrix + SkMatrix holds a 3x3 matrix for transforming coordinates. This allows mapping + SkPoint and vectors with translation, scaling, skewing, rotation, and + perspective. + + SkMatrix elements are in row major order. + SkMatrix constexpr default constructs to identity. + + SkMatrix includes a hidden variable that classifies the type of matrix to + improve performance. SkMatrix is not thread safe unless getType() is called first. + + example: https://fiddle.skia.org/c/@Matrix_063 +*/ +SK_BEGIN_REQUIRE_DENSE +class SK_API SkMatrix { +public: + + /** Creates an identity SkMatrix: + + | 1 0 0 | + | 0 1 0 | + | 0 0 1 | + */ + constexpr SkMatrix() : SkMatrix(1,0,0, 0,1,0, 0,0,1, kIdentity_Mask | kRectStaysRect_Mask) {} + + /** Sets SkMatrix to scale by (sx, sy). Returned matrix is: + + | sx 0 0 | + | 0 sy 0 | + | 0 0 1 | + + @param sx horizontal scale factor + @param sy vertical scale factor + @return SkMatrix with scale + */ + static SkMatrix SK_WARN_UNUSED_RESULT Scale(SkScalar sx, SkScalar sy) { + SkMatrix m; + m.setScale(sx, sy); + return m; + } + + /** Sets SkMatrix to translate by (dx, dy). Returned matrix is: + + | 1 0 dx | + | 0 1 dy | + | 0 0 1 | + + @param dx horizontal translation + @param dy vertical translation + @return SkMatrix with translation + */ + static SkMatrix SK_WARN_UNUSED_RESULT Translate(SkScalar dx, SkScalar dy) { + SkMatrix m; + m.setTranslate(dx, dy); + return m; + } + static SkMatrix SK_WARN_UNUSED_RESULT Translate(SkVector t) { return Translate(t.x(), t.y()); } + static SkMatrix SK_WARN_UNUSED_RESULT Translate(SkIVector t) { return Translate(t.x(), t.y()); } + + /** Sets SkMatrix to rotate by |deg| about a pivot point at (0, 0). + + @param deg rotation angle in degrees (positive rotates clockwise) + @return SkMatrix with rotation + */ + static SkMatrix SK_WARN_UNUSED_RESULT RotateDeg(SkScalar deg) { + SkMatrix m; + m.setRotate(deg); + return m; + } + static SkMatrix SK_WARN_UNUSED_RESULT RotateDeg(SkScalar deg, SkPoint pt) { + SkMatrix m; + m.setRotate(deg, pt.x(), pt.y()); + return m; + } + static SkMatrix SK_WARN_UNUSED_RESULT RotateRad(SkScalar rad) { + return RotateDeg(SkRadiansToDegrees(rad)); + } + + /** Sets SkMatrix to skew by (kx, ky) about pivot point (0, 0). + + @param kx horizontal skew factor + @param ky vertical skew factor + @return SkMatrix with skew + */ + static SkMatrix SK_WARN_UNUSED_RESULT Skew(SkScalar kx, SkScalar ky) { + SkMatrix m; + m.setSkew(kx, ky); + return m; + } + + /** \enum SkMatrix::ScaleToFit + ScaleToFit describes how SkMatrix is constructed to map one SkRect to another. + ScaleToFit may allow SkMatrix to have unequal horizontal and vertical scaling, + or may restrict SkMatrix to square scaling. If restricted, ScaleToFit specifies + how SkMatrix maps to the side or center of the destination SkRect. + */ + enum ScaleToFit { + kFill_ScaleToFit, //!< scales in x and y to fill destination SkRect + kStart_ScaleToFit, //!< scales and aligns to left and top + kCenter_ScaleToFit, //!< scales and aligns to center + kEnd_ScaleToFit, //!< scales and aligns to right and bottom + }; + + /** Returns SkMatrix set to scale and translate src to dst. ScaleToFit selects + whether mapping completely fills dst or preserves the aspect ratio, and how to + align src within dst. Returns the identity SkMatrix if src is empty. If dst is + empty, returns SkMatrix set to: + + | 0 0 0 | + | 0 0 0 | + | 0 0 1 | + + @param src SkRect to map from + @param dst SkRect to map to + @param mode How to handle the mapping + @return SkMatrix mapping src to dst + */ + static SkMatrix SK_WARN_UNUSED_RESULT RectToRect(const SkRect& src, const SkRect& dst, + ScaleToFit mode = kFill_ScaleToFit) { + return MakeRectToRect(src, dst, mode); + } + + /** Sets SkMatrix to: + + | scaleX skewX transX | + | skewY scaleY transY | + | pers0 pers1 pers2 | + + @param scaleX horizontal scale factor + @param skewX horizontal skew factor + @param transX horizontal translation + @param skewY vertical skew factor + @param scaleY vertical scale factor + @param transY vertical translation + @param pers0 input x-axis perspective factor + @param pers1 input y-axis perspective factor + @param pers2 perspective scale factor + @return SkMatrix constructed from parameters + */ + static SkMatrix SK_WARN_UNUSED_RESULT MakeAll(SkScalar scaleX, SkScalar skewX, SkScalar transX, + SkScalar skewY, SkScalar scaleY, SkScalar transY, + SkScalar pers0, SkScalar pers1, SkScalar pers2) { + SkMatrix m; + m.setAll(scaleX, skewX, transX, skewY, scaleY, transY, pers0, pers1, pers2); + return m; + } + + /** \enum SkMatrix::TypeMask + Enum of bit fields for mask returned by getType(). + Used to identify the complexity of SkMatrix, to optimize performance. + */ + enum TypeMask { + kIdentity_Mask = 0, //!< identity SkMatrix; all bits clear + kTranslate_Mask = 0x01, //!< translation SkMatrix + kScale_Mask = 0x02, //!< scale SkMatrix + kAffine_Mask = 0x04, //!< skew or rotate SkMatrix + kPerspective_Mask = 0x08, //!< perspective SkMatrix + }; + + /** Returns a bit field describing the transformations the matrix may + perform. The bit field is computed conservatively, so it may include + false positives. For example, when kPerspective_Mask is set, all + other bits are set. + + @return kIdentity_Mask, or combinations of: kTranslate_Mask, kScale_Mask, + kAffine_Mask, kPerspective_Mask + */ + TypeMask getType() const { + if (fTypeMask & kUnknown_Mask) { + fTypeMask = this->computeTypeMask(); + } + // only return the public masks + return (TypeMask)(fTypeMask & 0xF); + } + + /** Returns true if SkMatrix is identity. Identity matrix is: + + | 1 0 0 | + | 0 1 0 | + | 0 0 1 | + + @return true if SkMatrix has no effect + */ + bool isIdentity() const { + return this->getType() == 0; + } + + /** Returns true if SkMatrix at most scales and translates. SkMatrix may be identity, + contain only scale elements, only translate elements, or both. SkMatrix form is: + + | scale-x 0 translate-x | + | 0 scale-y translate-y | + | 0 0 1 | + + @return true if SkMatrix is identity; or scales, translates, or both + */ + bool isScaleTranslate() const { + return !(this->getType() & ~(kScale_Mask | kTranslate_Mask)); + } + + /** Returns true if SkMatrix is identity, or translates. SkMatrix form is: + + | 1 0 translate-x | + | 0 1 translate-y | + | 0 0 1 | + + @return true if SkMatrix is identity, or translates + */ + bool isTranslate() const { return !(this->getType() & ~(kTranslate_Mask)); } + + /** Returns true SkMatrix maps SkRect to another SkRect. If true, SkMatrix is identity, + or scales, or rotates a multiple of 90 degrees, or mirrors on axes. In all + cases, SkMatrix may also have translation. SkMatrix form is either: + + | scale-x 0 translate-x | + | 0 scale-y translate-y | + | 0 0 1 | + + or + + | 0 rotate-x translate-x | + | rotate-y 0 translate-y | + | 0 0 1 | + + for non-zero values of scale-x, scale-y, rotate-x, and rotate-y. + + Also called preservesAxisAlignment(); use the one that provides better inline + documentation. + + @return true if SkMatrix maps one SkRect into another + */ + bool rectStaysRect() const { + if (fTypeMask & kUnknown_Mask) { + fTypeMask = this->computeTypeMask(); + } + return (fTypeMask & kRectStaysRect_Mask) != 0; + } + + /** Returns true SkMatrix maps SkRect to another SkRect. If true, SkMatrix is identity, + or scales, or rotates a multiple of 90 degrees, or mirrors on axes. In all + cases, SkMatrix may also have translation. SkMatrix form is either: + + | scale-x 0 translate-x | + | 0 scale-y translate-y | + | 0 0 1 | + + or + + | 0 rotate-x translate-x | + | rotate-y 0 translate-y | + | 0 0 1 | + + for non-zero values of scale-x, scale-y, rotate-x, and rotate-y. + + Also called rectStaysRect(); use the one that provides better inline + documentation. + + @return true if SkMatrix maps one SkRect into another + */ + bool preservesAxisAlignment() const { return this->rectStaysRect(); } + + /** Returns true if the matrix contains perspective elements. SkMatrix form is: + + | -- -- -- | + | -- -- -- | + | perspective-x perspective-y perspective-scale | + + where perspective-x or perspective-y is non-zero, or perspective-scale is + not one. All other elements may have any value. + + @return true if SkMatrix is in most general form + */ + bool hasPerspective() const { + return SkToBool(this->getPerspectiveTypeMaskOnly() & + kPerspective_Mask); + } + + /** Returns true if SkMatrix contains only translation, rotation, reflection, and + uniform scale. + Returns false if SkMatrix contains different scales, skewing, perspective, or + degenerate forms that collapse to a line or point. + + Describes that the SkMatrix makes rendering with and without the matrix are + visually alike; a transformed circle remains a circle. Mathematically, this is + referred to as similarity of a Euclidean space, or a similarity transformation. + + Preserves right angles, keeping the arms of the angle equal lengths. + + @param tol to be deprecated + @return true if SkMatrix only rotates, uniformly scales, translates + + example: https://fiddle.skia.org/c/@Matrix_isSimilarity + */ + bool isSimilarity(SkScalar tol = SK_ScalarNearlyZero) const; + + /** Returns true if SkMatrix contains only translation, rotation, reflection, and + scale. Scale may differ along rotated axes. + Returns false if SkMatrix skewing, perspective, or degenerate forms that collapse + to a line or point. + + Preserves right angles, but not requiring that the arms of the angle + retain equal lengths. + + @param tol to be deprecated + @return true if SkMatrix only rotates, scales, translates + + example: https://fiddle.skia.org/c/@Matrix_preservesRightAngles + */ + bool preservesRightAngles(SkScalar tol = SK_ScalarNearlyZero) const; + + /** SkMatrix organizes its values in row-major order. These members correspond to + each value in SkMatrix. + */ + static constexpr int kMScaleX = 0; //!< horizontal scale factor + static constexpr int kMSkewX = 1; //!< horizontal skew factor + static constexpr int kMTransX = 2; //!< horizontal translation + static constexpr int kMSkewY = 3; //!< vertical skew factor + static constexpr int kMScaleY = 4; //!< vertical scale factor + static constexpr int kMTransY = 5; //!< vertical translation + static constexpr int kMPersp0 = 6; //!< input x perspective factor + static constexpr int kMPersp1 = 7; //!< input y perspective factor + static constexpr int kMPersp2 = 8; //!< perspective bias + + /** Affine arrays are in column-major order to match the matrix used by + PDF and XPS. + */ + static constexpr int kAScaleX = 0; //!< horizontal scale factor + static constexpr int kASkewY = 1; //!< vertical skew factor + static constexpr int kASkewX = 2; //!< horizontal skew factor + static constexpr int kAScaleY = 3; //!< vertical scale factor + static constexpr int kATransX = 4; //!< horizontal translation + static constexpr int kATransY = 5; //!< vertical translation + + /** Returns one matrix value. Asserts if index is out of range and SK_DEBUG is + defined. + + @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, + kMPersp0, kMPersp1, kMPersp2 + @return value corresponding to index + */ + SkScalar operator[](int index) const { + SkASSERT((unsigned)index < 9); + return fMat[index]; + } + + /** Returns one matrix value. Asserts if index is out of range and SK_DEBUG is + defined. + + @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, + kMPersp0, kMPersp1, kMPersp2 + @return value corresponding to index + */ + SkScalar get(int index) const { + SkASSERT((unsigned)index < 9); + return fMat[index]; + } + + /** Returns one matrix value from a particular row/column. Asserts if index is out + of range and SK_DEBUG is defined. + + @param r matrix row to fetch + @param c matrix column to fetch + @return value at the given matrix position + */ + SkScalar rc(int r, int c) const { + SkASSERT(r >= 0 && r <= 2); + SkASSERT(c >= 0 && c <= 2); + return fMat[r*3 + c]; + } + + /** Returns scale factor multiplied by x-axis input, contributing to x-axis output. + With mapPoints(), scales SkPoint along the x-axis. + + @return horizontal scale factor + */ + SkScalar getScaleX() const { return fMat[kMScaleX]; } + + /** Returns scale factor multiplied by y-axis input, contributing to y-axis output. + With mapPoints(), scales SkPoint along the y-axis. + + @return vertical scale factor + */ + SkScalar getScaleY() const { return fMat[kMScaleY]; } + + /** Returns scale factor multiplied by x-axis input, contributing to y-axis output. + With mapPoints(), skews SkPoint along the y-axis. + Skewing both axes can rotate SkPoint. + + @return vertical skew factor + */ + SkScalar getSkewY() const { return fMat[kMSkewY]; } + + /** Returns scale factor multiplied by y-axis input, contributing to x-axis output. + With mapPoints(), skews SkPoint along the x-axis. + Skewing both axes can rotate SkPoint. + + @return horizontal scale factor + */ + SkScalar getSkewX() const { return fMat[kMSkewX]; } + + /** Returns translation contributing to x-axis output. + With mapPoints(), moves SkPoint along the x-axis. + + @return horizontal translation factor + */ + SkScalar getTranslateX() const { return fMat[kMTransX]; } + + /** Returns translation contributing to y-axis output. + With mapPoints(), moves SkPoint along the y-axis. + + @return vertical translation factor + */ + SkScalar getTranslateY() const { return fMat[kMTransY]; } + + /** Returns factor scaling input x-axis relative to input y-axis. + + @return input x-axis perspective factor + */ + SkScalar getPerspX() const { return fMat[kMPersp0]; } + + /** Returns factor scaling input y-axis relative to input x-axis. + + @return input y-axis perspective factor + */ + SkScalar getPerspY() const { return fMat[kMPersp1]; } + + /** Returns writable SkMatrix value. Asserts if index is out of range and SK_DEBUG is + defined. Clears internal cache anticipating that caller will change SkMatrix value. + + Next call to read SkMatrix state may recompute cache; subsequent writes to SkMatrix + value must be followed by dirtyMatrixTypeCache(). + + @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, + kMPersp0, kMPersp1, kMPersp2 + @return writable value corresponding to index + */ + SkScalar& operator[](int index) { + SkASSERT((unsigned)index < 9); + this->setTypeMask(kUnknown_Mask); + return fMat[index]; + } + + /** Sets SkMatrix value. Asserts if index is out of range and SK_DEBUG is + defined. Safer than operator[]; internal cache is always maintained. + + @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, + kMPersp0, kMPersp1, kMPersp2 + @param value scalar to store in SkMatrix + */ + SkMatrix& set(int index, SkScalar value) { + SkASSERT((unsigned)index < 9); + fMat[index] = value; + this->setTypeMask(kUnknown_Mask); + return *this; + } + + /** Sets horizontal scale factor. + + @param v horizontal scale factor to store + */ + SkMatrix& setScaleX(SkScalar v) { return this->set(kMScaleX, v); } + + /** Sets vertical scale factor. + + @param v vertical scale factor to store + */ + SkMatrix& setScaleY(SkScalar v) { return this->set(kMScaleY, v); } + + /** Sets vertical skew factor. + + @param v vertical skew factor to store + */ + SkMatrix& setSkewY(SkScalar v) { return this->set(kMSkewY, v); } + + /** Sets horizontal skew factor. + + @param v horizontal skew factor to store + */ + SkMatrix& setSkewX(SkScalar v) { return this->set(kMSkewX, v); } + + /** Sets horizontal translation. + + @param v horizontal translation to store + */ + SkMatrix& setTranslateX(SkScalar v) { return this->set(kMTransX, v); } + + /** Sets vertical translation. + + @param v vertical translation to store + */ + SkMatrix& setTranslateY(SkScalar v) { return this->set(kMTransY, v); } + + /** Sets input x-axis perspective factor, which causes mapXY() to vary input x-axis values + inversely proportional to input y-axis values. + + @param v perspective factor + */ + SkMatrix& setPerspX(SkScalar v) { return this->set(kMPersp0, v); } + + /** Sets input y-axis perspective factor, which causes mapXY() to vary input y-axis values + inversely proportional to input x-axis values. + + @param v perspective factor + */ + SkMatrix& setPerspY(SkScalar v) { return this->set(kMPersp1, v); } + + /** Sets all values from parameters. Sets matrix to: + + | scaleX skewX transX | + | skewY scaleY transY | + | persp0 persp1 persp2 | + + @param scaleX horizontal scale factor to store + @param skewX horizontal skew factor to store + @param transX horizontal translation to store + @param skewY vertical skew factor to store + @param scaleY vertical scale factor to store + @param transY vertical translation to store + @param persp0 input x-axis values perspective factor to store + @param persp1 input y-axis values perspective factor to store + @param persp2 perspective scale factor to store + */ + SkMatrix& setAll(SkScalar scaleX, SkScalar skewX, SkScalar transX, + SkScalar skewY, SkScalar scaleY, SkScalar transY, + SkScalar persp0, SkScalar persp1, SkScalar persp2) { + fMat[kMScaleX] = scaleX; + fMat[kMSkewX] = skewX; + fMat[kMTransX] = transX; + fMat[kMSkewY] = skewY; + fMat[kMScaleY] = scaleY; + fMat[kMTransY] = transY; + fMat[kMPersp0] = persp0; + fMat[kMPersp1] = persp1; + fMat[kMPersp2] = persp2; + this->setTypeMask(kUnknown_Mask); + return *this; + } + + /** Copies nine scalar values contained by SkMatrix into buffer, in member value + ascending order: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, + kMPersp0, kMPersp1, kMPersp2. + + @param buffer storage for nine scalar values + */ + void get9(SkScalar buffer[9]) const { + memcpy(buffer, fMat, 9 * sizeof(SkScalar)); + } + + /** Sets SkMatrix to nine scalar values in buffer, in member value ascending order: + kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, kMPersp0, kMPersp1, + kMPersp2. + + Sets matrix to: + + | buffer[0] buffer[1] buffer[2] | + | buffer[3] buffer[4] buffer[5] | + | buffer[6] buffer[7] buffer[8] | + + In the future, set9 followed by get9 may not return the same values. Since SkMatrix + maps non-homogeneous coordinates, scaling all nine values produces an equivalent + transformation, possibly improving precision. + + @param buffer nine scalar values + */ + SkMatrix& set9(const SkScalar buffer[9]); + + /** Sets SkMatrix to identity; which has no effect on mapped SkPoint. Sets SkMatrix to: + + | 1 0 0 | + | 0 1 0 | + | 0 0 1 | + + Also called setIdentity(); use the one that provides better inline + documentation. + */ + SkMatrix& reset(); + + /** Sets SkMatrix to identity; which has no effect on mapped SkPoint. Sets SkMatrix to: + + | 1 0 0 | + | 0 1 0 | + | 0 0 1 | + + Also called reset(); use the one that provides better inline + documentation. + */ + SkMatrix& setIdentity() { return this->reset(); } + + /** Sets SkMatrix to translate by (dx, dy). + + @param dx horizontal translation + @param dy vertical translation + */ + SkMatrix& setTranslate(SkScalar dx, SkScalar dy); + + /** Sets SkMatrix to translate by (v.fX, v.fY). + + @param v vector containing horizontal and vertical translation + */ + SkMatrix& setTranslate(const SkVector& v) { return this->setTranslate(v.fX, v.fY); } + + /** Sets SkMatrix to scale by sx and sy, about a pivot point at (px, py). + The pivot point is unchanged when mapped with SkMatrix. + + @param sx horizontal scale factor + @param sy vertical scale factor + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& setScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py); + + /** Sets SkMatrix to scale by sx and sy about at pivot point at (0, 0). + + @param sx horizontal scale factor + @param sy vertical scale factor + */ + SkMatrix& setScale(SkScalar sx, SkScalar sy); + + /** Sets SkMatrix to rotate by degrees about a pivot point at (px, py). + The pivot point is unchanged when mapped with SkMatrix. + + Positive degrees rotates clockwise. + + @param degrees angle of axes relative to upright axes + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& setRotate(SkScalar degrees, SkScalar px, SkScalar py); + + /** Sets SkMatrix to rotate by degrees about a pivot point at (0, 0). + Positive degrees rotates clockwise. + + @param degrees angle of axes relative to upright axes + */ + SkMatrix& setRotate(SkScalar degrees); + + /** Sets SkMatrix to rotate by sinValue and cosValue, about a pivot point at (px, py). + The pivot point is unchanged when mapped with SkMatrix. + + Vector (sinValue, cosValue) describes the angle of rotation relative to (0, 1). + Vector length specifies scale. + + @param sinValue rotation vector x-axis component + @param cosValue rotation vector y-axis component + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& setSinCos(SkScalar sinValue, SkScalar cosValue, + SkScalar px, SkScalar py); + + /** Sets SkMatrix to rotate by sinValue and cosValue, about a pivot point at (0, 0). + + Vector (sinValue, cosValue) describes the angle of rotation relative to (0, 1). + Vector length specifies scale. + + @param sinValue rotation vector x-axis component + @param cosValue rotation vector y-axis component + */ + SkMatrix& setSinCos(SkScalar sinValue, SkScalar cosValue); + + /** Sets SkMatrix to rotate, scale, and translate using a compressed matrix form. + + Vector (rsxForm.fSSin, rsxForm.fSCos) describes the angle of rotation relative + to (0, 1). Vector length specifies scale. Mapped point is rotated and scaled + by vector, then translated by (rsxForm.fTx, rsxForm.fTy). + + @param rsxForm compressed SkRSXform matrix + @return reference to SkMatrix + + example: https://fiddle.skia.org/c/@Matrix_setRSXform + */ + SkMatrix& setRSXform(const SkRSXform& rsxForm); + + /** Sets SkMatrix to skew by kx and ky, about a pivot point at (px, py). + The pivot point is unchanged when mapped with SkMatrix. + + @param kx horizontal skew factor + @param ky vertical skew factor + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& setSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py); + + /** Sets SkMatrix to skew by kx and ky, about a pivot point at (0, 0). + + @param kx horizontal skew factor + @param ky vertical skew factor + */ + SkMatrix& setSkew(SkScalar kx, SkScalar ky); + + /** Sets SkMatrix to SkMatrix a multiplied by SkMatrix b. Either a or b may be this. + + Given: + + | A B C | | J K L | + a = | D E F |, b = | M N O | + | G H I | | P Q R | + + sets SkMatrix to: + + | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR | + a * b = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR | + | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR | + + @param a SkMatrix on left side of multiply expression + @param b SkMatrix on right side of multiply expression + */ + SkMatrix& setConcat(const SkMatrix& a, const SkMatrix& b); + + /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from translation (dx, dy). + This can be thought of as moving the point to be mapped before applying SkMatrix. + + Given: + + | A B C | | 1 0 dx | + Matrix = | D E F |, T(dx, dy) = | 0 1 dy | + | G H I | | 0 0 1 | + + sets SkMatrix to: + + | A B C | | 1 0 dx | | A B A*dx+B*dy+C | + Matrix * T(dx, dy) = | D E F | | 0 1 dy | = | D E D*dx+E*dy+F | + | G H I | | 0 0 1 | | G H G*dx+H*dy+I | + + @param dx x-axis translation before applying SkMatrix + @param dy y-axis translation before applying SkMatrix + */ + SkMatrix& preTranslate(SkScalar dx, SkScalar dy); + + /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from scaling by (sx, sy) + about pivot point (px, py). + This can be thought of as scaling about a pivot point before applying SkMatrix. + + Given: + + | A B C | | sx 0 dx | + Matrix = | D E F |, S(sx, sy, px, py) = | 0 sy dy | + | G H I | | 0 0 1 | + + where + + dx = px - sx * px + dy = py - sy * py + + sets SkMatrix to: + + | A B C | | sx 0 dx | | A*sx B*sy A*dx+B*dy+C | + Matrix * S(sx, sy, px, py) = | D E F | | 0 sy dy | = | D*sx E*sy D*dx+E*dy+F | + | G H I | | 0 0 1 | | G*sx H*sy G*dx+H*dy+I | + + @param sx horizontal scale factor + @param sy vertical scale factor + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& preScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py); + + /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from scaling by (sx, sy) + about pivot point (0, 0). + This can be thought of as scaling about the origin before applying SkMatrix. + + Given: + + | A B C | | sx 0 0 | + Matrix = | D E F |, S(sx, sy) = | 0 sy 0 | + | G H I | | 0 0 1 | + + sets SkMatrix to: + + | A B C | | sx 0 0 | | A*sx B*sy C | + Matrix * S(sx, sy) = | D E F | | 0 sy 0 | = | D*sx E*sy F | + | G H I | | 0 0 1 | | G*sx H*sy I | + + @param sx horizontal scale factor + @param sy vertical scale factor + */ + SkMatrix& preScale(SkScalar sx, SkScalar sy); + + /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from rotating by degrees + about pivot point (px, py). + This can be thought of as rotating about a pivot point before applying SkMatrix. + + Positive degrees rotates clockwise. + + Given: + + | A B C | | c -s dx | + Matrix = | D E F |, R(degrees, px, py) = | s c dy | + | G H I | | 0 0 1 | + + where + + c = cos(degrees) + s = sin(degrees) + dx = s * py + (1 - c) * px + dy = -s * px + (1 - c) * py + + sets SkMatrix to: + + | A B C | | c -s dx | | Ac+Bs -As+Bc A*dx+B*dy+C | + Matrix * R(degrees, px, py) = | D E F | | s c dy | = | Dc+Es -Ds+Ec D*dx+E*dy+F | + | G H I | | 0 0 1 | | Gc+Hs -Gs+Hc G*dx+H*dy+I | + + @param degrees angle of axes relative to upright axes + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& preRotate(SkScalar degrees, SkScalar px, SkScalar py); + + /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from rotating by degrees + about pivot point (0, 0). + This can be thought of as rotating about the origin before applying SkMatrix. + + Positive degrees rotates clockwise. + + Given: + + | A B C | | c -s 0 | + Matrix = | D E F |, R(degrees, px, py) = | s c 0 | + | G H I | | 0 0 1 | + + where + + c = cos(degrees) + s = sin(degrees) + + sets SkMatrix to: + + | A B C | | c -s 0 | | Ac+Bs -As+Bc C | + Matrix * R(degrees, px, py) = | D E F | | s c 0 | = | Dc+Es -Ds+Ec F | + | G H I | | 0 0 1 | | Gc+Hs -Gs+Hc I | + + @param degrees angle of axes relative to upright axes + */ + SkMatrix& preRotate(SkScalar degrees); + + /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from skewing by (kx, ky) + about pivot point (px, py). + This can be thought of as skewing about a pivot point before applying SkMatrix. + + Given: + + | A B C | | 1 kx dx | + Matrix = | D E F |, K(kx, ky, px, py) = | ky 1 dy | + | G H I | | 0 0 1 | + + where + + dx = -kx * py + dy = -ky * px + + sets SkMatrix to: + + | A B C | | 1 kx dx | | A+B*ky A*kx+B A*dx+B*dy+C | + Matrix * K(kx, ky, px, py) = | D E F | | ky 1 dy | = | D+E*ky D*kx+E D*dx+E*dy+F | + | G H I | | 0 0 1 | | G+H*ky G*kx+H G*dx+H*dy+I | + + @param kx horizontal skew factor + @param ky vertical skew factor + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& preSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py); + + /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from skewing by (kx, ky) + about pivot point (0, 0). + This can be thought of as skewing about the origin before applying SkMatrix. + + Given: + + | A B C | | 1 kx 0 | + Matrix = | D E F |, K(kx, ky) = | ky 1 0 | + | G H I | | 0 0 1 | + + sets SkMatrix to: + + | A B C | | 1 kx 0 | | A+B*ky A*kx+B C | + Matrix * K(kx, ky) = | D E F | | ky 1 0 | = | D+E*ky D*kx+E F | + | G H I | | 0 0 1 | | G+H*ky G*kx+H I | + + @param kx horizontal skew factor + @param ky vertical skew factor + */ + SkMatrix& preSkew(SkScalar kx, SkScalar ky); + + /** Sets SkMatrix to SkMatrix multiplied by SkMatrix other. + This can be thought of mapping by other before applying SkMatrix. + + Given: + + | A B C | | J K L | + Matrix = | D E F |, other = | M N O | + | G H I | | P Q R | + + sets SkMatrix to: + + | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR | + Matrix * other = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR | + | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR | + + @param other SkMatrix on right side of multiply expression + */ + SkMatrix& preConcat(const SkMatrix& other); + + /** Sets SkMatrix to SkMatrix constructed from translation (dx, dy) multiplied by SkMatrix. + This can be thought of as moving the point to be mapped after applying SkMatrix. + + Given: + + | J K L | | 1 0 dx | + Matrix = | M N O |, T(dx, dy) = | 0 1 dy | + | P Q R | | 0 0 1 | + + sets SkMatrix to: + + | 1 0 dx | | J K L | | J+dx*P K+dx*Q L+dx*R | + T(dx, dy) * Matrix = | 0 1 dy | | M N O | = | M+dy*P N+dy*Q O+dy*R | + | 0 0 1 | | P Q R | | P Q R | + + @param dx x-axis translation after applying SkMatrix + @param dy y-axis translation after applying SkMatrix + */ + SkMatrix& postTranslate(SkScalar dx, SkScalar dy); + + /** Sets SkMatrix to SkMatrix constructed from scaling by (sx, sy) about pivot point + (px, py), multiplied by SkMatrix. + This can be thought of as scaling about a pivot point after applying SkMatrix. + + Given: + + | J K L | | sx 0 dx | + Matrix = | M N O |, S(sx, sy, px, py) = | 0 sy dy | + | P Q R | | 0 0 1 | + + where + + dx = px - sx * px + dy = py - sy * py + + sets SkMatrix to: + + | sx 0 dx | | J K L | | sx*J+dx*P sx*K+dx*Q sx*L+dx+R | + S(sx, sy, px, py) * Matrix = | 0 sy dy | | M N O | = | sy*M+dy*P sy*N+dy*Q sy*O+dy*R | + | 0 0 1 | | P Q R | | P Q R | + + @param sx horizontal scale factor + @param sy vertical scale factor + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& postScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py); + + /** Sets SkMatrix to SkMatrix constructed from scaling by (sx, sy) about pivot point + (0, 0), multiplied by SkMatrix. + This can be thought of as scaling about the origin after applying SkMatrix. + + Given: + + | J K L | | sx 0 0 | + Matrix = | M N O |, S(sx, sy) = | 0 sy 0 | + | P Q R | | 0 0 1 | + + sets SkMatrix to: + + | sx 0 0 | | J K L | | sx*J sx*K sx*L | + S(sx, sy) * Matrix = | 0 sy 0 | | M N O | = | sy*M sy*N sy*O | + | 0 0 1 | | P Q R | | P Q R | + + @param sx horizontal scale factor + @param sy vertical scale factor + */ + SkMatrix& postScale(SkScalar sx, SkScalar sy); + + /** Sets SkMatrix to SkMatrix constructed from rotating by degrees about pivot point + (px, py), multiplied by SkMatrix. + This can be thought of as rotating about a pivot point after applying SkMatrix. + + Positive degrees rotates clockwise. + + Given: + + | J K L | | c -s dx | + Matrix = | M N O |, R(degrees, px, py) = | s c dy | + | P Q R | | 0 0 1 | + + where + + c = cos(degrees) + s = sin(degrees) + dx = s * py + (1 - c) * px + dy = -s * px + (1 - c) * py + + sets SkMatrix to: + + |c -s dx| |J K L| |cJ-sM+dx*P cK-sN+dx*Q cL-sO+dx+R| + R(degrees, px, py) * Matrix = |s c dy| |M N O| = |sJ+cM+dy*P sK+cN+dy*Q sL+cO+dy*R| + |0 0 1| |P Q R| | P Q R| + + @param degrees angle of axes relative to upright axes + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& postRotate(SkScalar degrees, SkScalar px, SkScalar py); + + /** Sets SkMatrix to SkMatrix constructed from rotating by degrees about pivot point + (0, 0), multiplied by SkMatrix. + This can be thought of as rotating about the origin after applying SkMatrix. + + Positive degrees rotates clockwise. + + Given: + + | J K L | | c -s 0 | + Matrix = | M N O |, R(degrees, px, py) = | s c 0 | + | P Q R | | 0 0 1 | + + where + + c = cos(degrees) + s = sin(degrees) + + sets SkMatrix to: + + | c -s dx | | J K L | | cJ-sM cK-sN cL-sO | + R(degrees, px, py) * Matrix = | s c dy | | M N O | = | sJ+cM sK+cN sL+cO | + | 0 0 1 | | P Q R | | P Q R | + + @param degrees angle of axes relative to upright axes + */ + SkMatrix& postRotate(SkScalar degrees); + + /** Sets SkMatrix to SkMatrix constructed from skewing by (kx, ky) about pivot point + (px, py), multiplied by SkMatrix. + This can be thought of as skewing about a pivot point after applying SkMatrix. + + Given: + + | J K L | | 1 kx dx | + Matrix = | M N O |, K(kx, ky, px, py) = | ky 1 dy | + | P Q R | | 0 0 1 | + + where + + dx = -kx * py + dy = -ky * px + + sets SkMatrix to: + + | 1 kx dx| |J K L| |J+kx*M+dx*P K+kx*N+dx*Q L+kx*O+dx+R| + K(kx, ky, px, py) * Matrix = |ky 1 dy| |M N O| = |ky*J+M+dy*P ky*K+N+dy*Q ky*L+O+dy*R| + | 0 0 1| |P Q R| | P Q R| + + @param kx horizontal skew factor + @param ky vertical skew factor + @param px pivot on x-axis + @param py pivot on y-axis + */ + SkMatrix& postSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py); + + /** Sets SkMatrix to SkMatrix constructed from skewing by (kx, ky) about pivot point + (0, 0), multiplied by SkMatrix. + This can be thought of as skewing about the origin after applying SkMatrix. + + Given: + + | J K L | | 1 kx 0 | + Matrix = | M N O |, K(kx, ky) = | ky 1 0 | + | P Q R | | 0 0 1 | + + sets SkMatrix to: + + | 1 kx 0 | | J K L | | J+kx*M K+kx*N L+kx*O | + K(kx, ky) * Matrix = | ky 1 0 | | M N O | = | ky*J+M ky*K+N ky*L+O | + | 0 0 1 | | P Q R | | P Q R | + + @param kx horizontal skew factor + @param ky vertical skew factor + */ + SkMatrix& postSkew(SkScalar kx, SkScalar ky); + + /** Sets SkMatrix to SkMatrix other multiplied by SkMatrix. + This can be thought of mapping by other after applying SkMatrix. + + Given: + + | J K L | | A B C | + Matrix = | M N O |, other = | D E F | + | P Q R | | G H I | + + sets SkMatrix to: + + | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR | + other * Matrix = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR | + | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR | + + @param other SkMatrix on left side of multiply expression + */ + SkMatrix& postConcat(const SkMatrix& other); + +#ifndef SK_SUPPORT_LEGACY_MATRIX_RECTTORECT +private: +#endif + /** Sets SkMatrix to scale and translate src SkRect to dst SkRect. stf selects whether + mapping completely fills dst or preserves the aspect ratio, and how to align + src within dst. Returns false if src is empty, and sets SkMatrix to identity. + Returns true if dst is empty, and sets SkMatrix to: + + | 0 0 0 | + | 0 0 0 | + | 0 0 1 | + + @param src SkRect to map from + @param dst SkRect to map to + @return true if SkMatrix can represent SkRect mapping + + example: https://fiddle.skia.org/c/@Matrix_setRectToRect + */ + bool setRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit stf); + + /** Returns SkMatrix set to scale and translate src SkRect to dst SkRect. stf selects + whether mapping completely fills dst or preserves the aspect ratio, and how to + align src within dst. Returns the identity SkMatrix if src is empty. If dst is + empty, returns SkMatrix set to: + + | 0 0 0 | + | 0 0 0 | + | 0 0 1 | + + @param src SkRect to map from + @param dst SkRect to map to + @return SkMatrix mapping src to dst + */ + static SkMatrix MakeRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit stf) { + SkMatrix m; + m.setRectToRect(src, dst, stf); + return m; + } +#ifndef SK_SUPPORT_LEGACY_MATRIX_RECTTORECT +public: +#endif + + /** Sets SkMatrix to map src to dst. count must be zero or greater, and four or less. + + If count is zero, sets SkMatrix to identity and returns true. + If count is one, sets SkMatrix to translate and returns true. + If count is two or more, sets SkMatrix to map SkPoint if possible; returns false + if SkMatrix cannot be constructed. If count is four, SkMatrix may include + perspective. + + @param src SkPoint to map from + @param dst SkPoint to map to + @param count number of SkPoint in src and dst + @return true if SkMatrix was constructed successfully + + example: https://fiddle.skia.org/c/@Matrix_setPolyToPoly + */ + bool setPolyToPoly(const SkPoint src[], const SkPoint dst[], int count); + + /** Sets inverse to reciprocal matrix, returning true if SkMatrix can be inverted. + Geometrically, if SkMatrix maps from source to destination, inverse SkMatrix + maps from destination to source. If SkMatrix can not be inverted, inverse is + unchanged. + + @param inverse storage for inverted SkMatrix; may be nullptr + @return true if SkMatrix can be inverted + */ + bool SK_WARN_UNUSED_RESULT invert(SkMatrix* inverse) const { + // Allow the trivial case to be inlined. + if (this->isIdentity()) { + if (inverse) { + inverse->reset(); + } + return true; + } + return this->invertNonIdentity(inverse); + } + + /** Fills affine with identity values in column major order. + Sets affine to: + + | 1 0 0 | + | 0 1 0 | + + Affine 3 by 2 matrices in column major order are used by OpenGL and XPS. + + @param affine storage for 3 by 2 affine matrix + + example: https://fiddle.skia.org/c/@Matrix_SetAffineIdentity + */ + static void SetAffineIdentity(SkScalar affine[6]); + + /** Fills affine in column major order. Sets affine to: + + | scale-x skew-x translate-x | + | skew-y scale-y translate-y | + + If SkMatrix contains perspective, returns false and leaves affine unchanged. + + @param affine storage for 3 by 2 affine matrix; may be nullptr + @return true if SkMatrix does not contain perspective + */ + bool SK_WARN_UNUSED_RESULT asAffine(SkScalar affine[6]) const; + + /** Sets SkMatrix to affine values, passed in column major order. Given affine, + column, then row, as: + + | scale-x skew-x translate-x | + | skew-y scale-y translate-y | + + SkMatrix is set, row, then column, to: + + | scale-x skew-x translate-x | + | skew-y scale-y translate-y | + | 0 0 1 | + + @param affine 3 by 2 affine matrix + */ + SkMatrix& setAffine(const SkScalar affine[6]); + + /** + * A matrix is categorized as 'perspective' if the bottom row is not [0, 0, 1]. + * However, for most uses (e.g. mapPoints) a bottom row of [0, 0, X] behaves like a + * non-perspective matrix, though it will be categorized as perspective. Calling + * normalizePerspective() will change the matrix such that, if its bottom row was [0, 0, X], + * it will be changed to [0, 0, 1] by scaling the rest of the matrix by 1/X. + * + * | A B C | | A/X B/X C/X | + * | D E F | -> | D/X E/X F/X | for X != 0 + * | 0 0 X | | 0 0 1 | + */ + void normalizePerspective() { + if (fMat[8] != 1) { + this->doNormalizePerspective(); + } + } + + /** Maps src SkPoint array of length count to dst SkPoint array of equal or greater + length. SkPoint are mapped by multiplying each SkPoint by SkMatrix. Given: + + | A B C | | x | + Matrix = | D E F |, pt = | y | + | G H I | | 1 | + + where + + for (i = 0; i < count; ++i) { + x = src[i].fX + y = src[i].fY + } + + each dst SkPoint is computed as: + + |A B C| |x| Ax+By+C Dx+Ey+F + Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- + |G H I| |1| Gx+Hy+I Gx+Hy+I + + src and dst may point to the same storage. + + @param dst storage for mapped SkPoint + @param src SkPoint to transform + @param count number of SkPoint to transform + + example: https://fiddle.skia.org/c/@Matrix_mapPoints + */ + void mapPoints(SkPoint dst[], const SkPoint src[], int count) const; + + /** Maps pts SkPoint array of length count in place. SkPoint are mapped by multiplying + each SkPoint by SkMatrix. Given: + + | A B C | | x | + Matrix = | D E F |, pt = | y | + | G H I | | 1 | + + where + + for (i = 0; i < count; ++i) { + x = pts[i].fX + y = pts[i].fY + } + + each resulting pts SkPoint is computed as: + + |A B C| |x| Ax+By+C Dx+Ey+F + Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- + |G H I| |1| Gx+Hy+I Gx+Hy+I + + @param pts storage for mapped SkPoint + @param count number of SkPoint to transform + */ + void mapPoints(SkPoint pts[], int count) const { + this->mapPoints(pts, pts, count); + } + + /** Maps src SkPoint3 array of length count to dst SkPoint3 array, which must of length count or + greater. SkPoint3 array is mapped by multiplying each SkPoint3 by SkMatrix. Given: + + | A B C | | x | + Matrix = | D E F |, src = | y | + | G H I | | z | + + each resulting dst SkPoint is computed as: + + |A B C| |x| + Matrix * src = |D E F| |y| = |Ax+By+Cz Dx+Ey+Fz Gx+Hy+Iz| + |G H I| |z| + + @param dst storage for mapped SkPoint3 array + @param src SkPoint3 array to transform + @param count items in SkPoint3 array to transform + + example: https://fiddle.skia.org/c/@Matrix_mapHomogeneousPoints + */ + void mapHomogeneousPoints(SkPoint3 dst[], const SkPoint3 src[], int count) const; + + /** + * Returns homogeneous points, starting with 2D src points (with implied w = 1). + */ + void mapHomogeneousPoints(SkPoint3 dst[], const SkPoint src[], int count) const; + + /** Returns SkPoint pt multiplied by SkMatrix. Given: + + | A B C | | x | + Matrix = | D E F |, pt = | y | + | G H I | | 1 | + + result is computed as: + + |A B C| |x| Ax+By+C Dx+Ey+F + Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- + |G H I| |1| Gx+Hy+I Gx+Hy+I + + @param p SkPoint to map + @return mapped SkPoint + */ + SkPoint mapPoint(SkPoint pt) const { + SkPoint result; + this->mapXY(pt.x(), pt.y(), &result); + return result; + } + + /** Maps SkPoint (x, y) to result. SkPoint is mapped by multiplying by SkMatrix. Given: + + | A B C | | x | + Matrix = | D E F |, pt = | y | + | G H I | | 1 | + + result is computed as: + + |A B C| |x| Ax+By+C Dx+Ey+F + Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- + |G H I| |1| Gx+Hy+I Gx+Hy+I + + @param x x-axis value of SkPoint to map + @param y y-axis value of SkPoint to map + @param result storage for mapped SkPoint + + example: https://fiddle.skia.org/c/@Matrix_mapXY + */ + void mapXY(SkScalar x, SkScalar y, SkPoint* result) const; + + /** Returns SkPoint (x, y) multiplied by SkMatrix. Given: + + | A B C | | x | + Matrix = | D E F |, pt = | y | + | G H I | | 1 | + + result is computed as: + + |A B C| |x| Ax+By+C Dx+Ey+F + Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- + |G H I| |1| Gx+Hy+I Gx+Hy+I + + @param x x-axis value of SkPoint to map + @param y y-axis value of SkPoint to map + @return mapped SkPoint + */ + SkPoint mapXY(SkScalar x, SkScalar y) const { + SkPoint result; + this->mapXY(x,y, &result); + return result; + } + + + /** Returns (0, 0) multiplied by SkMatrix. Given: + + | A B C | | 0 | + Matrix = | D E F |, pt = | 0 | + | G H I | | 1 | + + result is computed as: + + |A B C| |0| C F + Matrix * pt = |D E F| |0| = |C F I| = - , - + |G H I| |1| I I + + @return mapped (0, 0) + */ + SkPoint mapOrigin() const { + SkScalar x = this->getTranslateX(), + y = this->getTranslateY(); + if (this->hasPerspective()) { + SkScalar w = fMat[kMPersp2]; + if (w) { w = 1 / w; } + x *= w; + y *= w; + } + return {x, y}; + } + + /** Maps src vector array of length count to vector SkPoint array of equal or greater + length. Vectors are mapped by multiplying each vector by SkMatrix, treating + SkMatrix translation as zero. Given: + + | A B 0 | | x | + Matrix = | D E 0 |, src = | y | + | G H I | | 1 | + + where + + for (i = 0; i < count; ++i) { + x = src[i].fX + y = src[i].fY + } + + each dst vector is computed as: + + |A B 0| |x| Ax+By Dx+Ey + Matrix * src = |D E 0| |y| = |Ax+By Dx+Ey Gx+Hy+I| = ------- , ------- + |G H I| |1| Gx+Hy+I Gx+Hy+I + + src and dst may point to the same storage. + + @param dst storage for mapped vectors + @param src vectors to transform + @param count number of vectors to transform + + example: https://fiddle.skia.org/c/@Matrix_mapVectors + */ + void mapVectors(SkVector dst[], const SkVector src[], int count) const; + + /** Maps vecs vector array of length count in place, multiplying each vector by + SkMatrix, treating SkMatrix translation as zero. Given: + + | A B 0 | | x | + Matrix = | D E 0 |, vec = | y | + | G H I | | 1 | + + where + + for (i = 0; i < count; ++i) { + x = vecs[i].fX + y = vecs[i].fY + } + + each result vector is computed as: + + |A B 0| |x| Ax+By Dx+Ey + Matrix * vec = |D E 0| |y| = |Ax+By Dx+Ey Gx+Hy+I| = ------- , ------- + |G H I| |1| Gx+Hy+I Gx+Hy+I + + @param vecs vectors to transform, and storage for mapped vectors + @param count number of vectors to transform + */ + void mapVectors(SkVector vecs[], int count) const { + this->mapVectors(vecs, vecs, count); + } + + /** Maps vector (dx, dy) to result. Vector is mapped by multiplying by SkMatrix, + treating SkMatrix translation as zero. Given: + + | A B 0 | | dx | + Matrix = | D E 0 |, vec = | dy | + | G H I | | 1 | + + each result vector is computed as: + + |A B 0| |dx| A*dx+B*dy D*dx+E*dy + Matrix * vec = |D E 0| |dy| = |A*dx+B*dy D*dx+E*dy G*dx+H*dy+I| = ----------- , ----------- + |G H I| | 1| G*dx+H*dy+I G*dx+*dHy+I + + @param dx x-axis value of vector to map + @param dy y-axis value of vector to map + @param result storage for mapped vector + */ + void mapVector(SkScalar dx, SkScalar dy, SkVector* result) const { + SkVector vec = { dx, dy }; + this->mapVectors(result, &vec, 1); + } + + /** Returns vector (dx, dy) multiplied by SkMatrix, treating SkMatrix translation as zero. + Given: + + | A B 0 | | dx | + Matrix = | D E 0 |, vec = | dy | + | G H I | | 1 | + + each result vector is computed as: + + |A B 0| |dx| A*dx+B*dy D*dx+E*dy + Matrix * vec = |D E 0| |dy| = |A*dx+B*dy D*dx+E*dy G*dx+H*dy+I| = ----------- , ----------- + |G H I| | 1| G*dx+H*dy+I G*dx+*dHy+I + + @param dx x-axis value of vector to map + @param dy y-axis value of vector to map + @return mapped vector + */ + SkVector mapVector(SkScalar dx, SkScalar dy) const { + SkVector vec = { dx, dy }; + this->mapVectors(&vec, &vec, 1); + return vec; + } + + /** Sets dst to bounds of src corners mapped by SkMatrix. + Returns true if mapped corners are dst corners. + + Returned value is the same as calling rectStaysRect(). + + @param dst storage for bounds of mapped SkPoint + @param src SkRect to map + @param pc whether to apply perspective clipping + @return true if dst is equivalent to mapped src + + example: https://fiddle.skia.org/c/@Matrix_mapRect + */ + bool mapRect(SkRect* dst, const SkRect& src, + SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const; + + /** Sets rect to bounds of rect corners mapped by SkMatrix. + Returns true if mapped corners are computed rect corners. + + Returned value is the same as calling rectStaysRect(). + + @param rect rectangle to map, and storage for bounds of mapped corners + @param pc whether to apply perspective clipping + @return true if result is equivalent to mapped rect + */ + bool mapRect(SkRect* rect, SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const { + return this->mapRect(rect, *rect, pc); + } + + /** Returns bounds of src corners mapped by SkMatrix. + + @param src rectangle to map + @return mapped bounds + */ + SkRect mapRect(const SkRect& src, + SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const { + SkRect dst; + (void)this->mapRect(&dst, src, pc); + return dst; + } + + /** Maps four corners of rect to dst. SkPoint are mapped by multiplying each + rect corner by SkMatrix. rect corner is processed in this order: + (rect.fLeft, rect.fTop), (rect.fRight, rect.fTop), (rect.fRight, rect.fBottom), + (rect.fLeft, rect.fBottom). + + rect may be empty: rect.fLeft may be greater than or equal to rect.fRight; + rect.fTop may be greater than or equal to rect.fBottom. + + Given: + + | A B C | | x | + Matrix = | D E F |, pt = | y | + | G H I | | 1 | + + where pt is initialized from each of (rect.fLeft, rect.fTop), + (rect.fRight, rect.fTop), (rect.fRight, rect.fBottom), (rect.fLeft, rect.fBottom), + each dst SkPoint is computed as: + + |A B C| |x| Ax+By+C Dx+Ey+F + Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- + |G H I| |1| Gx+Hy+I Gx+Hy+I + + @param dst storage for mapped corner SkPoint + @param rect SkRect to map + + Note: this does not perform perspective clipping (as that might result in more than + 4 points, so results are suspect if the matrix contains perspective. + */ + void mapRectToQuad(SkPoint dst[4], const SkRect& rect) const { + // This could potentially be faster if we only transformed each x and y of the rect once. + rect.toQuad(dst); + this->mapPoints(dst, 4); + } + + /** Sets dst to bounds of src corners mapped by SkMatrix. If matrix contains + elements other than scale or translate: asserts if SK_DEBUG is defined; + otherwise, results are undefined. + + @param dst storage for bounds of mapped SkPoint + @param src SkRect to map + + example: https://fiddle.skia.org/c/@Matrix_mapRectScaleTranslate + */ + void mapRectScaleTranslate(SkRect* dst, const SkRect& src) const; + + /** Returns geometric mean radius of ellipse formed by constructing circle of + size radius, and mapping constructed circle with SkMatrix. The result squared is + equal to the major axis length times the minor axis length. + Result is not meaningful if SkMatrix contains perspective elements. + + @param radius circle size to map + @return average mapped radius + + example: https://fiddle.skia.org/c/@Matrix_mapRadius + */ + SkScalar mapRadius(SkScalar radius) const; + + /** Compares a and b; returns true if a and b are numerically equal. Returns true + even if sign of zero values are different. Returns false if either SkMatrix + contains NaN, even if the other SkMatrix also contains NaN. + + @param a SkMatrix to compare + @param b SkMatrix to compare + @return true if SkMatrix a and SkMatrix b are numerically equal + */ + friend SK_API bool operator==(const SkMatrix& a, const SkMatrix& b); + + /** Compares a and b; returns true if a and b are not numerically equal. Returns false + even if sign of zero values are different. Returns true if either SkMatrix + contains NaN, even if the other SkMatrix also contains NaN. + + @param a SkMatrix to compare + @param b SkMatrix to compare + @return true if SkMatrix a and SkMatrix b are numerically not equal + */ + friend SK_API bool operator!=(const SkMatrix& a, const SkMatrix& b) { + return !(a == b); + } + + /** Writes text representation of SkMatrix to standard output. Floating point values + are written with limited precision; it may not be possible to reconstruct + original SkMatrix from output. + + example: https://fiddle.skia.org/c/@Matrix_dump + */ + void dump() const; + + /** Returns the minimum scaling factor of SkMatrix by decomposing the scaling and + skewing elements. + Returns -1 if scale factor overflows or SkMatrix contains perspective. + + @return minimum scale factor + + example: https://fiddle.skia.org/c/@Matrix_getMinScale + */ + SkScalar getMinScale() const; + + /** Returns the maximum scaling factor of SkMatrix by decomposing the scaling and + skewing elements. + Returns -1 if scale factor overflows or SkMatrix contains perspective. + + @return maximum scale factor + + example: https://fiddle.skia.org/c/@Matrix_getMaxScale + */ + SkScalar getMaxScale() const; + + /** Sets scaleFactors[0] to the minimum scaling factor, and scaleFactors[1] to the + maximum scaling factor. Scaling factors are computed by decomposing + the SkMatrix scaling and skewing elements. + + Returns true if scaleFactors are found; otherwise, returns false and sets + scaleFactors to undefined values. + + @param scaleFactors storage for minimum and maximum scale factors + @return true if scale factors were computed correctly + */ + bool SK_WARN_UNUSED_RESULT getMinMaxScales(SkScalar scaleFactors[2]) const; + + /** Decomposes SkMatrix into scale components and whatever remains. Returns false if + SkMatrix could not be decomposed. + + Sets scale to portion of SkMatrix that scale axes. Sets remaining to SkMatrix + with scaling factored out. remaining may be passed as nullptr + to determine if SkMatrix can be decomposed without computing remainder. + + Returns true if scale components are found. scale and remaining are + unchanged if SkMatrix contains perspective; scale factors are not finite, or + are nearly zero. + + On success: Matrix = Remaining * scale. + + @param scale axes scaling factors; may be nullptr + @param remaining SkMatrix without scaling; may be nullptr + @return true if scale can be computed + + example: https://fiddle.skia.org/c/@Matrix_decomposeScale + */ + bool decomposeScale(SkSize* scale, SkMatrix* remaining = nullptr) const; + + /** Returns reference to const identity SkMatrix. Returned SkMatrix is set to: + + | 1 0 0 | + | 0 1 0 | + | 0 0 1 | + + @return const identity SkMatrix + + example: https://fiddle.skia.org/c/@Matrix_I + */ + static const SkMatrix& I(); + + /** Returns reference to a const SkMatrix with invalid values. Returned SkMatrix is set + to: + + | SK_ScalarMax SK_ScalarMax SK_ScalarMax | + | SK_ScalarMax SK_ScalarMax SK_ScalarMax | + | SK_ScalarMax SK_ScalarMax SK_ScalarMax | + + @return const invalid SkMatrix + + example: https://fiddle.skia.org/c/@Matrix_InvalidMatrix + */ + static const SkMatrix& InvalidMatrix(); + + /** Returns SkMatrix a multiplied by SkMatrix b. + + Given: + + | A B C | | J K L | + a = | D E F |, b = | M N O | + | G H I | | P Q R | + + sets SkMatrix to: + + | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR | + a * b = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR | + | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR | + + @param a SkMatrix on left side of multiply expression + @param b SkMatrix on right side of multiply expression + @return SkMatrix computed from a times b + */ + static SkMatrix Concat(const SkMatrix& a, const SkMatrix& b) { + SkMatrix result; + result.setConcat(a, b); + return result; + } + + friend SkMatrix operator*(const SkMatrix& a, const SkMatrix& b) { + return Concat(a, b); + } + + /** Sets internal cache to unknown state. Use to force update after repeated + modifications to SkMatrix element reference returned by operator[](int index). + */ + void dirtyMatrixTypeCache() { + this->setTypeMask(kUnknown_Mask); + } + + /** Initializes SkMatrix with scale and translate elements. + + | sx 0 tx | + | 0 sy ty | + | 0 0 1 | + + @param sx horizontal scale factor to store + @param sy vertical scale factor to store + @param tx horizontal translation to store + @param ty vertical translation to store + */ + void setScaleTranslate(SkScalar sx, SkScalar sy, SkScalar tx, SkScalar ty) { + fMat[kMScaleX] = sx; + fMat[kMSkewX] = 0; + fMat[kMTransX] = tx; + + fMat[kMSkewY] = 0; + fMat[kMScaleY] = sy; + fMat[kMTransY] = ty; + + fMat[kMPersp0] = 0; + fMat[kMPersp1] = 0; + fMat[kMPersp2] = 1; + + int mask = 0; + if (sx != 1 || sy != 1) { + mask |= kScale_Mask; + } + if (tx != 0.0f || ty != 0.0f) { + mask |= kTranslate_Mask; + } + this->setTypeMask(mask | kRectStaysRect_Mask); + } + + /** Returns true if all elements of the matrix are finite. Returns false if any + element is infinity, or NaN. + + @return true if matrix has only finite elements + */ + bool isFinite() const { return SkScalarsAreFinite(fMat, 9); } + +private: + /** Set if the matrix will map a rectangle to another rectangle. This + can be true if the matrix is scale-only, or rotates a multiple of + 90 degrees. + + This bit will be set on identity matrices + */ + static constexpr int kRectStaysRect_Mask = 0x10; + + /** Set if the perspective bit is valid even though the rest of + the matrix is Unknown. + */ + static constexpr int kOnlyPerspectiveValid_Mask = 0x40; + + static constexpr int kUnknown_Mask = 0x80; + + static constexpr int kORableMasks = kTranslate_Mask | + kScale_Mask | + kAffine_Mask | + kPerspective_Mask; + + static constexpr int kAllMasks = kTranslate_Mask | + kScale_Mask | + kAffine_Mask | + kPerspective_Mask | + kRectStaysRect_Mask; + + SkScalar fMat[9]; + mutable int32_t fTypeMask; + + constexpr SkMatrix(SkScalar sx, SkScalar kx, SkScalar tx, + SkScalar ky, SkScalar sy, SkScalar ty, + SkScalar p0, SkScalar p1, SkScalar p2, int typeMask) + : fMat{sx, kx, tx, + ky, sy, ty, + p0, p1, p2} + , fTypeMask(typeMask) {} + + static void ComputeInv(SkScalar dst[9], const SkScalar src[9], double invDet, bool isPersp); + + uint8_t computeTypeMask() const; + uint8_t computePerspectiveTypeMask() const; + + void setTypeMask(int mask) { + // allow kUnknown or a valid mask + SkASSERT(kUnknown_Mask == mask || (mask & kAllMasks) == mask || + ((kUnknown_Mask | kOnlyPerspectiveValid_Mask) & mask) + == (kUnknown_Mask | kOnlyPerspectiveValid_Mask)); + fTypeMask = mask; + } + + void orTypeMask(int mask) { + SkASSERT((mask & kORableMasks) == mask); + fTypeMask |= mask; + } + + void clearTypeMask(int mask) { + // only allow a valid mask + SkASSERT((mask & kAllMasks) == mask); + fTypeMask &= ~mask; + } + + TypeMask getPerspectiveTypeMaskOnly() const { + if ((fTypeMask & kUnknown_Mask) && + !(fTypeMask & kOnlyPerspectiveValid_Mask)) { + fTypeMask = this->computePerspectiveTypeMask(); + } + return (TypeMask)(fTypeMask & 0xF); + } + + /** Returns true if we already know that the matrix is identity; + false otherwise. + */ + bool isTriviallyIdentity() const { + if (fTypeMask & kUnknown_Mask) { + return false; + } + return ((fTypeMask & 0xF) == 0); + } + + inline void updateTranslateMask() { + if ((fMat[kMTransX] != 0) | (fMat[kMTransY] != 0)) { + fTypeMask |= kTranslate_Mask; + } else { + fTypeMask &= ~kTranslate_Mask; + } + } + + typedef void (*MapXYProc)(const SkMatrix& mat, SkScalar x, SkScalar y, + SkPoint* result); + + static MapXYProc GetMapXYProc(TypeMask mask) { + SkASSERT((mask & ~kAllMasks) == 0); + return gMapXYProcs[mask & kAllMasks]; + } + + MapXYProc getMapXYProc() const { + return GetMapXYProc(this->getType()); + } + + typedef void (*MapPtsProc)(const SkMatrix& mat, SkPoint dst[], + const SkPoint src[], int count); + + static MapPtsProc GetMapPtsProc(TypeMask mask) { + SkASSERT((mask & ~kAllMasks) == 0); + return gMapPtsProcs[mask & kAllMasks]; + } + + MapPtsProc getMapPtsProc() const { + return GetMapPtsProc(this->getType()); + } + + bool SK_WARN_UNUSED_RESULT invertNonIdentity(SkMatrix* inverse) const; + + static bool Poly2Proc(const SkPoint[], SkMatrix*); + static bool Poly3Proc(const SkPoint[], SkMatrix*); + static bool Poly4Proc(const SkPoint[], SkMatrix*); + + static void Identity_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*); + static void Trans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*); + static void Scale_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*); + static void ScaleTrans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*); + static void Rot_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*); + static void RotTrans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*); + static void Persp_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*); + + static const MapXYProc gMapXYProcs[]; + + static void Identity_pts(const SkMatrix&, SkPoint[], const SkPoint[], int); + static void Trans_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int); + static void Scale_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int); + static void ScaleTrans_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], + int count); + static void Persp_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int); + + static void Affine_vpts(const SkMatrix&, SkPoint dst[], const SkPoint[], int); + + static const MapPtsProc gMapPtsProcs[]; + + // return the number of bytes written, whether or not buffer is null + size_t writeToMemory(void* buffer) const; + /** + * Reads data from the buffer parameter + * + * @param buffer Memory to read from + * @param length Amount of memory available in the buffer + * @return number of bytes read (must be a multiple of 4) or + * 0 if there was not enough memory available + */ + size_t readFromMemory(const void* buffer, size_t length); + + // legacy method -- still needed? why not just postScale(1/divx, ...)? + bool postIDiv(int divx, int divy); + void doNormalizePerspective(); + + friend class SkPerspIter; + friend class SkMatrixPriv; + friend class SerializationTest; +}; +SK_END_REQUIRE_DENSE + +#endif diff --git a/src/deps/skia/include/core/SkMilestone.h b/src/deps/skia/include/core/SkMilestone.h new file mode 100644 index 000000000..8f036e3f3 --- /dev/null +++ b/src/deps/skia/include/core/SkMilestone.h @@ -0,0 +1,9 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef SK_MILESTONE +#define SK_MILESTONE 99 +#endif diff --git a/src/deps/skia/include/core/SkOverdrawCanvas.h b/src/deps/skia/include/core/SkOverdrawCanvas.h new file mode 100644 index 000000000..1be26c6fa --- /dev/null +++ b/src/deps/skia/include/core/SkOverdrawCanvas.h @@ -0,0 +1,68 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkOverdrawCanvas_DEFINED +#define SkOverdrawCanvas_DEFINED + +#include "include/core/SkCanvasVirtualEnforcer.h" +#include "include/utils/SkNWayCanvas.h" + +/** + * Captures all drawing commands. Rather than draw the actual content, this device + * increments the alpha channel of each pixel every time it would have been touched + * by a draw call. This is useful for detecting overdraw. + */ +class SK_API SkOverdrawCanvas : public SkCanvasVirtualEnforcer<SkNWayCanvas> { +public: + /* Does not take ownership of canvas */ + SkOverdrawCanvas(SkCanvas*); + + void onDrawTextBlob(const SkTextBlob*, SkScalar, SkScalar, const SkPaint&) override; + void onDrawGlyphRunList(const SkGlyphRunList& glyphRunList, const SkPaint& paint) override; + void onDrawPatch(const SkPoint[12], const SkColor[4], const SkPoint[4], SkBlendMode, + const SkPaint&) override; + void onDrawPaint(const SkPaint&) override; + void onDrawBehind(const SkPaint& paint) override; + void onDrawRect(const SkRect&, const SkPaint&) override; + void onDrawRegion(const SkRegion&, const SkPaint&) override; + void onDrawOval(const SkRect&, const SkPaint&) override; + void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override; + void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override; + void onDrawRRect(const SkRRect&, const SkPaint&) override; + void onDrawPoints(PointMode, size_t, const SkPoint[], const SkPaint&) override; + void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override; + void onDrawPath(const SkPath&, const SkPaint&) override; + + void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&, + const SkPaint*) override; + void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&, + const SkPaint*, SrcRectConstraint) override; + void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode, + const SkPaint*) override; + void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int, + SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override; + + void onDrawDrawable(SkDrawable*, const SkMatrix*) override; + void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override; + + void onDrawAnnotation(const SkRect&, const char key[], SkData* value) override; + void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override; + + void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], SkCanvas::QuadAAFlags, const SkColor4f&, + SkBlendMode) override; + void onDrawEdgeAAImageSet2(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[], + const SkSamplingOptions&,const SkPaint*, SrcRectConstraint) override; + +private: + inline SkPaint overdrawPaint(const SkPaint& paint); + + SkPaint fPaint; + + using INHERITED = SkCanvasVirtualEnforcer<SkNWayCanvas>; +}; + +#endif diff --git a/src/deps/skia/include/core/SkPaint.h b/src/deps/skia/include/core/SkPaint.h new file mode 100644 index 000000000..e3cc0391d --- /dev/null +++ b/src/deps/skia/include/core/SkPaint.h @@ -0,0 +1,720 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPaint_DEFINED +#define SkPaint_DEFINED + +#include "include/core/SkBlendMode.h" +#include "include/core/SkColor.h" +#include "include/core/SkRefCnt.h" +#include "include/private/SkTOptional.h" +#include "include/private/SkTo.h" + +class SkBlender; +class SkColorFilter; +class SkColorSpace; +struct SkRect; +class SkImageFilter; +class SkMaskFilter; +class SkMatrix; +class SkPath; +class SkPathEffect; +class SkShader; + +/** \class SkPaint + SkPaint controls options applied when drawing. SkPaint collects all + options outside of the SkCanvas clip and SkCanvas matrix. + + Various options apply to strokes and fills, and images. + + SkPaint collects effects and filters that describe single-pass and multiple-pass + algorithms that alter the drawing geometry, color, and transparency. For instance, + SkPaint does not directly implement dashing or blur, but contains the objects that do so. +*/ +class SK_API SkPaint { +public: + + /** Constructs SkPaint with default values. + + @return default initialized SkPaint + + example: https://fiddle.skia.org/c/@Paint_empty_constructor + */ + SkPaint(); + + /** Constructs SkPaint with default values and the given color. + + Sets alpha and RGB used when stroking and filling. The color is four floating + point values, unpremultiplied. The color values are interpreted as being in + the colorSpace. If colorSpace is nullptr, then color is assumed to be in the + sRGB color space. + + @param color unpremultiplied RGBA + @param colorSpace SkColorSpace describing the encoding of color + @return SkPaint with the given color + */ + explicit SkPaint(const SkColor4f& color, SkColorSpace* colorSpace = nullptr); + + /** Makes a shallow copy of SkPaint. SkPathEffect, SkShader, + SkMaskFilter, SkColorFilter, and SkImageFilter are shared + between the original paint and the copy. Objects containing SkRefCnt increment + their references by one. + + The referenced objects SkPathEffect, SkShader, SkMaskFilter, SkColorFilter, + and SkImageFilter cannot be modified after they are created. + This prevents objects with SkRefCnt from being modified once SkPaint refers to them. + + @param paint original to copy + @return shallow copy of paint + + example: https://fiddle.skia.org/c/@Paint_copy_const_SkPaint + */ + SkPaint(const SkPaint& paint); + + /** Implements a move constructor to avoid increasing the reference counts + of objects referenced by the paint. + + After the call, paint is undefined, and can be safely destructed. + + @param paint original to move + @return content of paint + + example: https://fiddle.skia.org/c/@Paint_move_SkPaint + */ + SkPaint(SkPaint&& paint); + + /** Decreases SkPaint SkRefCnt of owned objects: SkPathEffect, SkShader, + SkMaskFilter, SkColorFilter, and SkImageFilter. If the + objects containing SkRefCnt go to zero, they are deleted. + */ + ~SkPaint(); + + /** Makes a shallow copy of SkPaint. SkPathEffect, SkShader, + SkMaskFilter, SkColorFilter, and SkImageFilter are shared + between the original paint and the copy. Objects containing SkRefCnt in the + prior destination are decreased by one, and the referenced objects are deleted if the + resulting count is zero. Objects containing SkRefCnt in the parameter paint + are increased by one. paint is unmodified. + + @param paint original to copy + @return content of paint + + example: https://fiddle.skia.org/c/@Paint_copy_operator + */ + SkPaint& operator=(const SkPaint& paint); + + /** Moves the paint to avoid increasing the reference counts + of objects referenced by the paint parameter. Objects containing SkRefCnt in the + prior destination are decreased by one; those objects are deleted if the resulting count + is zero. + + After the call, paint is undefined, and can be safely destructed. + + @param paint original to move + @return content of paint + + example: https://fiddle.skia.org/c/@Paint_move_operator + */ + SkPaint& operator=(SkPaint&& paint); + + /** Compares a and b, and returns true if a and b are equivalent. May return false + if SkPathEffect, SkShader, SkMaskFilter, SkColorFilter, + or SkImageFilter have identical contents but different pointers. + + @param a SkPaint to compare + @param b SkPaint to compare + @return true if SkPaint pair are equivalent + */ + SK_API friend bool operator==(const SkPaint& a, const SkPaint& b); + + /** Compares a and b, and returns true if a and b are not equivalent. May return true + if SkPathEffect, SkShader, SkMaskFilter, SkColorFilter, + or SkImageFilter have identical contents but different pointers. + + @param a SkPaint to compare + @param b SkPaint to compare + @return true if SkPaint pair are not equivalent + */ + friend bool operator!=(const SkPaint& a, const SkPaint& b) { + return !(a == b); + } + + /** Sets all SkPaint contents to their initial values. This is equivalent to replacing + SkPaint with the result of SkPaint(). + + example: https://fiddle.skia.org/c/@Paint_reset + */ + void reset(); + + /** Returns true if pixels on the active edges of SkPath may be drawn with partial transparency. + @return antialiasing state + */ + bool isAntiAlias() const { + return SkToBool(fBitfields.fAntiAlias); + } + + /** Requests, but does not require, that edge pixels draw opaque or with + partial transparency. + @param aa setting for antialiasing + */ + void setAntiAlias(bool aa) { fBitfields.fAntiAlias = static_cast<unsigned>(aa); } + + /** Returns true if color error may be distributed to smooth color transition. + @return dithering state + */ + bool isDither() const { + return SkToBool(fBitfields.fDither); + } + + /** Requests, but does not require, to distribute color error. + @param dither setting for ditering + */ + void setDither(bool dither) { fBitfields.fDither = static_cast<unsigned>(dither); } + + /** \enum SkPaint::Style + Set Style to fill, stroke, or both fill and stroke geometry. + The stroke and fill + share all paint attributes; for instance, they are drawn with the same color. + + Use kStrokeAndFill_Style to avoid hitting the same pixels twice with a stroke draw and + a fill draw. + */ + enum Style : uint8_t { + kFill_Style, //!< set to fill geometry + kStroke_Style, //!< set to stroke geometry + kStrokeAndFill_Style, //!< sets to stroke and fill geometry + }; + + /** May be used to verify that SkPaint::Style is a legal value. + */ + static constexpr int kStyleCount = kStrokeAndFill_Style + 1; + + /** Returns whether the geometry is filled, stroked, or filled and stroked. + */ + Style getStyle() const { return (Style)fBitfields.fStyle; } + + /** Sets whether the geometry is filled, stroked, or filled and stroked. + Has no effect if style is not a legal SkPaint::Style value. + + example: https://fiddle.skia.org/c/@Paint_setStyle + example: https://fiddle.skia.org/c/@Stroke_Width + */ + void setStyle(Style style); + + /** + * Set paint's style to kStroke if true, or kFill if false. + */ + void setStroke(bool); + + /** Retrieves alpha and RGB, unpremultiplied, packed into 32 bits. + Use helpers SkColorGetA(), SkColorGetR(), SkColorGetG(), and SkColorGetB() to extract + a color component. + + @return unpremultiplied ARGB + */ + SkColor getColor() const { return fColor4f.toSkColor(); } + + /** Retrieves alpha and RGB, unpremultiplied, as four floating point values. RGB are + extended sRGB values (sRGB gamut, and encoded with the sRGB transfer function). + + @return unpremultiplied RGBA + */ + SkColor4f getColor4f() const { return fColor4f; } + + /** Sets alpha and RGB used when stroking and filling. The color is a 32-bit value, + unpremultiplied, packing 8-bit components for alpha, red, blue, and green. + + @param color unpremultiplied ARGB + + example: https://fiddle.skia.org/c/@Paint_setColor + */ + void setColor(SkColor color); + + /** Sets alpha and RGB used when stroking and filling. The color is four floating + point values, unpremultiplied. The color values are interpreted as being in + the colorSpace. If colorSpace is nullptr, then color is assumed to be in the + sRGB color space. + + @param color unpremultiplied RGBA + @param colorSpace SkColorSpace describing the encoding of color + */ + void setColor(const SkColor4f& color, SkColorSpace* colorSpace = nullptr); + + void setColor4f(const SkColor4f& color, SkColorSpace* colorSpace = nullptr) { + this->setColor(color, colorSpace); + } + + /** Retrieves alpha from the color used when stroking and filling. + + @return alpha ranging from zero, fully transparent, to 255, fully opaque + */ + float getAlphaf() const { return fColor4f.fA; } + + // Helper that scales the alpha by 255. + uint8_t getAlpha() const { return sk_float_round2int(this->getAlphaf() * 255); } + + /** Replaces alpha, leaving RGB + unchanged. An out of range value triggers an assert in the debug + build. a is a value from 0.0 to 1.0. + a set to zero makes color fully transparent; a set to 1.0 makes color + fully opaque. + + @param a alpha component of color + */ + void setAlphaf(float a); + + // Helper that accepts an int between 0 and 255, and divides it by 255.0 + void setAlpha(U8CPU a) { + this->setAlphaf(a * (1.0f / 255)); + } + + /** Sets color used when drawing solid fills. The color components range from 0 to 255. + The color is unpremultiplied; alpha sets the transparency independent of RGB. + + @param a amount of alpha, from fully transparent (0) to fully opaque (255) + @param r amount of red, from no red (0) to full red (255) + @param g amount of green, from no green (0) to full green (255) + @param b amount of blue, from no blue (0) to full blue (255) + + example: https://fiddle.skia.org/c/@Paint_setARGB + */ + void setARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b); + + /** Returns the thickness of the pen used by SkPaint to + outline the shape. + + @return zero for hairline, greater than zero for pen thickness + */ + SkScalar getStrokeWidth() const { return fWidth; } + + /** Sets the thickness of the pen used by the paint to outline the shape. + A stroke-width of zero is treated as "hairline" width. Hairlines are always exactly one + pixel wide in device space (their thickness does not change as the canvas is scaled). + Negative stroke-widths are invalid; setting a negative width will have no effect. + + @param width zero thickness for hairline; greater than zero for pen thickness + + example: https://fiddle.skia.org/c/@Miter_Limit + example: https://fiddle.skia.org/c/@Paint_setStrokeWidth + */ + void setStrokeWidth(SkScalar width); + + /** Returns the limit at which a sharp corner is drawn beveled. + + @return zero and greater miter limit + */ + SkScalar getStrokeMiter() const { return fMiterLimit; } + + /** Sets the limit at which a sharp corner is drawn beveled. + Valid values are zero and greater. + Has no effect if miter is less than zero. + + @param miter zero and greater miter limit + + example: https://fiddle.skia.org/c/@Paint_setStrokeMiter + */ + void setStrokeMiter(SkScalar miter); + + /** \enum SkPaint::Cap + Cap draws at the beginning and end of an open path contour. + */ + enum Cap { + kButt_Cap, //!< no stroke extension + kRound_Cap, //!< adds circle + kSquare_Cap, //!< adds square + kLast_Cap = kSquare_Cap, //!< largest Cap value + kDefault_Cap = kButt_Cap, //!< equivalent to kButt_Cap + }; + + /** May be used to verify that SkPaint::Cap is a legal value. + */ + static constexpr int kCapCount = kLast_Cap + 1; + + /** \enum SkPaint::Join + Join specifies how corners are drawn when a shape is stroked. Join + affects the four corners of a stroked rectangle, and the connected segments in a + stroked path. + + Choose miter join to draw sharp corners. Choose round join to draw a circle with a + radius equal to the stroke width on top of the corner. Choose bevel join to minimally + connect the thick strokes. + + The fill path constructed to describe the stroked path respects the join setting but may + not contain the actual join. For instance, a fill path constructed with round joins does + not necessarily include circles at each connected segment. + */ + enum Join : uint8_t { + kMiter_Join, //!< extends to miter limit + kRound_Join, //!< adds circle + kBevel_Join, //!< connects outside edges + kLast_Join = kBevel_Join, //!< equivalent to the largest value for Join + kDefault_Join = kMiter_Join, //!< equivalent to kMiter_Join + }; + + /** May be used to verify that SkPaint::Join is a legal value. + */ + static constexpr int kJoinCount = kLast_Join + 1; + + /** Returns the geometry drawn at the beginning and end of strokes. + */ + Cap getStrokeCap() const { return (Cap)fBitfields.fCapType; } + + /** Sets the geometry drawn at the beginning and end of strokes. + + example: https://fiddle.skia.org/c/@Paint_setStrokeCap_a + example: https://fiddle.skia.org/c/@Paint_setStrokeCap_b + */ + void setStrokeCap(Cap cap); + + /** Returns the geometry drawn at the corners of strokes. + */ + Join getStrokeJoin() const { return (Join)fBitfields.fJoinType; } + + /** Sets the geometry drawn at the corners of strokes. + + example: https://fiddle.skia.org/c/@Paint_setStrokeJoin + */ + void setStrokeJoin(Join join); + + /** Returns the filled equivalent of the stroked path. + + @param src SkPath read to create a filled version + @param dst resulting SkPath; may be the same as src, but may not be nullptr + @param cullRect optional limit passed to SkPathEffect + @param resScale if > 1, increase precision, else if (0 < resScale < 1) reduce precision + to favor speed and size + @return true if the path represents style fill, or false if it represents hairline + */ + bool getFillPath(const SkPath& src, SkPath* dst, const SkRect* cullRect, + SkScalar resScale = 1) const; + + bool getFillPath(const SkPath& src, SkPath* dst, const SkRect* cullRect, + const SkMatrix& ctm) const; + + /** Returns the filled equivalent of the stroked path. + + Replaces dst with the src path modified by SkPathEffect and style stroke. + SkPathEffect, if any, is not culled. stroke width is created with default precision. + + @param src SkPath read to create a filled version + @param dst resulting SkPath dst may be the same as src, but may not be nullptr + @return true if the path represents style fill, or false if it represents hairline + */ + bool getFillPath(const SkPath& src, SkPath* dst) const { + return this->getFillPath(src, dst, nullptr, 1); + } + + /** Returns optional colors used when filling a path, such as a gradient. + + Does not alter SkShader SkRefCnt. + + @return SkShader if previously set, nullptr otherwise + */ + SkShader* getShader() const { return fShader.get(); } + + /** Returns optional colors used when filling a path, such as a gradient. + + Increases SkShader SkRefCnt by one. + + @return SkShader if previously set, nullptr otherwise + + example: https://fiddle.skia.org/c/@Paint_refShader + */ + sk_sp<SkShader> refShader() const; + + /** Sets optional colors used when filling a path, such as a gradient. + + Sets SkShader to shader, decreasing SkRefCnt of the previous SkShader. + Increments shader SkRefCnt by one. + + @param shader how geometry is filled with color; if nullptr, color is used instead + + example: https://fiddle.skia.org/c/@Color_Filter_Methods + example: https://fiddle.skia.org/c/@Paint_setShader + */ + void setShader(sk_sp<SkShader> shader); + + /** Returns SkColorFilter if set, or nullptr. + Does not alter SkColorFilter SkRefCnt. + + @return SkColorFilter if previously set, nullptr otherwise + */ + SkColorFilter* getColorFilter() const { return fColorFilter.get(); } + + /** Returns SkColorFilter if set, or nullptr. + Increases SkColorFilter SkRefCnt by one. + + @return SkColorFilter if set, or nullptr + + example: https://fiddle.skia.org/c/@Paint_refColorFilter + */ + sk_sp<SkColorFilter> refColorFilter() const; + + /** Sets SkColorFilter to filter, decreasing SkRefCnt of the previous + SkColorFilter. Pass nullptr to clear SkColorFilter. + + Increments filter SkRefCnt by one. + + @param colorFilter SkColorFilter to apply to subsequent draw + + example: https://fiddle.skia.org/c/@Blend_Mode_Methods + example: https://fiddle.skia.org/c/@Paint_setColorFilter + */ + void setColorFilter(sk_sp<SkColorFilter> colorFilter); + + /** If the current blender can be represented as a SkBlendMode enum, this returns that + * enum in the optional's value(). If it cannot, then the returned optional does not + * contain a value. + */ + skstd::optional<SkBlendMode> asBlendMode() const; + + /** + * Queries the blender, and if it can be represented as a SkBlendMode, return that mode, + * else return the defaultMode provided. + */ + SkBlendMode getBlendMode_or(SkBlendMode defaultMode) const; + + /** Returns true iff the current blender claims to be equivalent to SkBlendMode::kSrcOver. + * + * Also returns true of the current blender is nullptr. + */ + bool isSrcOver() const; + + /** Helper method for calling setBlender(). + * + * This sets a blender that implements the specified blendmode enum. + */ + void setBlendMode(SkBlendMode mode); + + /** Returns the user-supplied blend function, if one has been set. + * Does not alter SkBlender's SkRefCnt. + * + * A nullptr blender signifies the default SrcOver behavior. + * + * @return the SkBlender assigned to this paint, otherwise nullptr + */ + SkBlender* getBlender() const { return fBlender.get(); } + + /** Returns the user-supplied blend function, if one has been set. + * Increments the SkBlender's SkRefCnt by one. + * + * A nullptr blender signifies the default SrcOver behavior. + * + * @return the SkBlender assigned to this paint, otherwise nullptr + */ + sk_sp<SkBlender> refBlender() const; + + /** Sets the current blender, increasing its refcnt, and if a blender is already + * present, decreasing that object's refcnt. + * + * A nullptr blender signifies the default SrcOver behavior. + * + * For convenience, you can call setBlendMode() if the blend effect can be expressed + * as one of those values. + */ + void setBlender(sk_sp<SkBlender> blender); + + /** Returns SkPathEffect if set, or nullptr. + Does not alter SkPathEffect SkRefCnt. + + @return SkPathEffect if previously set, nullptr otherwise + */ + SkPathEffect* getPathEffect() const { return fPathEffect.get(); } + + /** Returns SkPathEffect if set, or nullptr. + Increases SkPathEffect SkRefCnt by one. + + @return SkPathEffect if previously set, nullptr otherwise + + example: https://fiddle.skia.org/c/@Paint_refPathEffect + */ + sk_sp<SkPathEffect> refPathEffect() const; + + /** Sets SkPathEffect to pathEffect, decreasing SkRefCnt of the previous + SkPathEffect. Pass nullptr to leave the path geometry unaltered. + + Increments pathEffect SkRefCnt by one. + + @param pathEffect replace SkPath with a modification when drawn + + example: https://fiddle.skia.org/c/@Mask_Filter_Methods + example: https://fiddle.skia.org/c/@Paint_setPathEffect + */ + void setPathEffect(sk_sp<SkPathEffect> pathEffect); + + /** Returns SkMaskFilter if set, or nullptr. + Does not alter SkMaskFilter SkRefCnt. + + @return SkMaskFilter if previously set, nullptr otherwise + */ + SkMaskFilter* getMaskFilter() const { return fMaskFilter.get(); } + + /** Returns SkMaskFilter if set, or nullptr. + + Increases SkMaskFilter SkRefCnt by one. + + @return SkMaskFilter if previously set, nullptr otherwise + + example: https://fiddle.skia.org/c/@Paint_refMaskFilter + */ + sk_sp<SkMaskFilter> refMaskFilter() const; + + /** Sets SkMaskFilter to maskFilter, decreasing SkRefCnt of the previous + SkMaskFilter. Pass nullptr to clear SkMaskFilter and leave SkMaskFilter effect on + mask alpha unaltered. + + Increments maskFilter SkRefCnt by one. + + @param maskFilter modifies clipping mask generated from drawn geometry + + example: https://fiddle.skia.org/c/@Paint_setMaskFilter + example: https://fiddle.skia.org/c/@Typeface_Methods + */ + void setMaskFilter(sk_sp<SkMaskFilter> maskFilter); + + /** Returns SkImageFilter if set, or nullptr. + Does not alter SkImageFilter SkRefCnt. + + @return SkImageFilter if previously set, nullptr otherwise + */ + SkImageFilter* getImageFilter() const { return fImageFilter.get(); } + + /** Returns SkImageFilter if set, or nullptr. + Increases SkImageFilter SkRefCnt by one. + + @return SkImageFilter if previously set, nullptr otherwise + + example: https://fiddle.skia.org/c/@Paint_refImageFilter + */ + sk_sp<SkImageFilter> refImageFilter() const; + + /** Sets SkImageFilter to imageFilter, decreasing SkRefCnt of the previous + SkImageFilter. Pass nullptr to clear SkImageFilter, and remove SkImageFilter effect + on drawing. + + Increments imageFilter SkRefCnt by one. + + @param imageFilter how SkImage is sampled when transformed + + example: https://fiddle.skia.org/c/@Paint_setImageFilter + */ + void setImageFilter(sk_sp<SkImageFilter> imageFilter); + + /** Returns true if SkPaint prevents all drawing; + otherwise, the SkPaint may or may not allow drawing. + + Returns true if, for example, SkBlendMode combined with alpha computes a + new alpha of zero. + + @return true if SkPaint prevents all drawing + + example: https://fiddle.skia.org/c/@Paint_nothingToDraw + */ + bool nothingToDraw() const; + + /** (to be made private) + Returns true if SkPaint does not include elements requiring extensive computation + to compute SkBaseDevice bounds of drawn geometry. For instance, SkPaint with SkPathEffect + always returns false. + + @return true if SkPaint allows for fast computation of bounds + */ + bool canComputeFastBounds() const; + + /** (to be made private) + Only call this if canComputeFastBounds() returned true. This takes a + raw rectangle (the raw bounds of a shape), and adjusts it for stylistic + effects in the paint (e.g. stroking). If needed, it uses the storage + parameter. It returns the adjusted bounds that can then be used + for SkCanvas::quickReject tests. + + The returned SkRect will either be orig or storage, thus the caller + should not rely on storage being set to the result, but should always + use the returned value. It is legal for orig and storage to be the same + SkRect. + For example: + if (!path.isInverseFillType() && paint.canComputeFastBounds()) { + SkRect storage; + if (canvas->quickReject(paint.computeFastBounds(path.getBounds(), &storage))) { + return; // do not draw the path + } + } + // draw the path + + @param orig geometry modified by SkPaint when drawn + @param storage computed bounds of geometry; may not be nullptr + @return fast computed bounds + */ + const SkRect& computeFastBounds(const SkRect& orig, SkRect* storage) const { + // Things like stroking, etc... will do math on the bounds rect, assuming that it's sorted. + SkASSERT(orig.isSorted()); + SkPaint::Style style = this->getStyle(); + // ultra fast-case: filling with no effects that affect geometry + if (kFill_Style == style) { + uintptr_t effects = 0; + effects |= reinterpret_cast<uintptr_t>(this->getMaskFilter()); + effects |= reinterpret_cast<uintptr_t>(this->getPathEffect()); + effects |= reinterpret_cast<uintptr_t>(this->getImageFilter()); + if (!effects) { + return orig; + } + } + + return this->doComputeFastBounds(orig, storage, style); + } + + /** (to be made private) + + @param orig geometry modified by SkPaint when drawn + @param storage computed bounds of geometry + @return fast computed bounds + */ + const SkRect& computeFastStrokeBounds(const SkRect& orig, + SkRect* storage) const { + return this->doComputeFastBounds(orig, storage, kStroke_Style); + } + + /** (to be made private) + Computes the bounds, overriding the SkPaint SkPaint::Style. This can be used to + account for additional width required by stroking orig, without + altering SkPaint::Style set to fill. + + @param orig geometry modified by SkPaint when drawn + @param storage computed bounds of geometry + @param style overrides SkPaint::Style + @return fast computed bounds + */ + const SkRect& doComputeFastBounds(const SkRect& orig, SkRect* storage, + Style style) const; + +private: + sk_sp<SkPathEffect> fPathEffect; + sk_sp<SkShader> fShader; + sk_sp<SkMaskFilter> fMaskFilter; + sk_sp<SkColorFilter> fColorFilter; + sk_sp<SkImageFilter> fImageFilter; + sk_sp<SkBlender> fBlender; + + SkColor4f fColor4f; + SkScalar fWidth; + SkScalar fMiterLimit; + union { + struct { + unsigned fAntiAlias : 1; + unsigned fDither : 1; + unsigned fCapType : 2; + unsigned fJoinType : 2; + unsigned fStyle : 2; + unsigned fPadding : 24; // 24 == 32 -1-1-2-2-2 + } fBitfields; + uint32_t fBitfieldsUInt; + }; + + friend class SkPaintPriv; +}; + +#endif diff --git a/src/deps/skia/include/core/SkPath.h b/src/deps/skia/include/core/SkPath.h new file mode 100644 index 000000000..178e4d22f --- /dev/null +++ b/src/deps/skia/include/core/SkPath.h @@ -0,0 +1,1891 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPath_DEFINED +#define SkPath_DEFINED + +#include "include/core/SkMatrix.h" +#include "include/core/SkPathTypes.h" +#include "include/private/SkPathRef.h" +#include "include/private/SkTo.h" + +#include <initializer_list> + +class SkAutoPathBoundsUpdate; +class SkData; +class SkRRect; +class SkWStream; + +// WIP -- define this locally, and fix call-sites to use SkPathBuilder (skbug.com/9000) +//#define SK_HIDE_PATH_EDIT_METHODS + +/** \class SkPath + SkPath contain geometry. SkPath may be empty, or contain one or more verbs that + outline a figure. SkPath always starts with a move verb to a Cartesian coordinate, + and may be followed by additional verbs that add lines or curves. + Adding a close verb makes the geometry into a continuous loop, a closed contour. + SkPath may contain any number of contours, each beginning with a move verb. + + SkPath contours may contain only a move verb, or may also contain lines, + quadratic beziers, conics, and cubic beziers. SkPath contours may be open or + closed. + + When used to draw a filled area, SkPath describes whether the fill is inside or + outside the geometry. SkPath also describes the winding rule used to fill + overlapping contours. + + Internally, SkPath lazily computes metrics likes bounds and convexity. Call + SkPath::updateBoundsCache to make SkPath thread safe. +*/ +class SK_API SkPath { +public: + /** + * Create a new path with the specified segments. + * + * The points and weights arrays are read in order, based on the sequence of verbs. + * + * Move 1 point + * Line 1 point + * Quad 2 points + * Conic 2 points and 1 weight + * Cubic 3 points + * Close 0 points + * + * If an illegal sequence of verbs is encountered, or the specified number of points + * or weights is not sufficient given the verbs, an empty Path is returned. + * + * A legal sequence of verbs consists of any number of Contours. A contour always begins + * with a Move verb, followed by 0 or more segments: Line, Quad, Conic, Cubic, followed + * by an optional Close. + */ + static SkPath Make(const SkPoint[], int pointCount, + const uint8_t[], int verbCount, + const SkScalar[], int conicWeightCount, + SkPathFillType, bool isVolatile = false); + + static SkPath Rect(const SkRect&, SkPathDirection = SkPathDirection::kCW, + unsigned startIndex = 0); + static SkPath Oval(const SkRect&, SkPathDirection = SkPathDirection::kCW); + static SkPath Oval(const SkRect&, SkPathDirection, unsigned startIndex); + static SkPath Circle(SkScalar center_x, SkScalar center_y, SkScalar radius, + SkPathDirection dir = SkPathDirection::kCW); + static SkPath RRect(const SkRRect&, SkPathDirection dir = SkPathDirection::kCW); + static SkPath RRect(const SkRRect&, SkPathDirection, unsigned startIndex); + static SkPath RRect(const SkRect& bounds, SkScalar rx, SkScalar ry, + SkPathDirection dir = SkPathDirection::kCW); + + static SkPath Polygon(const SkPoint pts[], int count, bool isClosed, + SkPathFillType = SkPathFillType::kWinding, + bool isVolatile = false); + + static SkPath Polygon(const std::initializer_list<SkPoint>& list, bool isClosed, + SkPathFillType fillType = SkPathFillType::kWinding, + bool isVolatile = false) { + return Polygon(list.begin(), SkToInt(list.size()), isClosed, fillType, isVolatile); + } + + static SkPath Line(const SkPoint a, const SkPoint b) { + return Polygon({a, b}, false); + } + + /** Constructs an empty SkPath. By default, SkPath has no verbs, no SkPoint, and no weights. + FillType is set to kWinding. + + @return empty SkPath + + example: https://fiddle.skia.org/c/@Path_empty_constructor + */ + SkPath(); + + /** Constructs a copy of an existing path. + Copy constructor makes two paths identical by value. Internally, path and + the returned result share pointer values. The underlying verb array, SkPoint array + and weights are copied when modified. + + Creating a SkPath copy is very efficient and never allocates memory. + SkPath are always copied by value from the interface; the underlying shared + pointers are not exposed. + + @param path SkPath to copy by value + @return copy of SkPath + + example: https://fiddle.skia.org/c/@Path_copy_const_SkPath + */ + SkPath(const SkPath& path); + + /** Releases ownership of any shared data and deletes data if SkPath is sole owner. + + example: https://fiddle.skia.org/c/@Path_destructor + */ + ~SkPath(); + + /** Constructs a copy of an existing path. + SkPath assignment makes two paths identical by value. Internally, assignment + shares pointer values. The underlying verb array, SkPoint array and weights + are copied when modified. + + Copying SkPath by assignment is very efficient and never allocates memory. + SkPath are always copied by value from the interface; the underlying shared + pointers are not exposed. + + @param path verb array, SkPoint array, weights, and SkPath::FillType to copy + @return SkPath copied by value + + example: https://fiddle.skia.org/c/@Path_copy_operator + */ + SkPath& operator=(const SkPath& path); + + /** Compares a and b; returns true if SkPath::FillType, verb array, SkPoint array, and weights + are equivalent. + + @param a SkPath to compare + @param b SkPath to compare + @return true if SkPath pair are equivalent + */ + friend SK_API bool operator==(const SkPath& a, const SkPath& b); + + /** Compares a and b; returns true if SkPath::FillType, verb array, SkPoint array, and weights + are not equivalent. + + @param a SkPath to compare + @param b SkPath to compare + @return true if SkPath pair are not equivalent + */ + friend bool operator!=(const SkPath& a, const SkPath& b) { + return !(a == b); + } + + /** Returns true if SkPath contain equal verbs and equal weights. + If SkPath contain one or more conics, the weights must match. + + conicTo() may add different verbs depending on conic weight, so it is not + trivial to interpolate a pair of SkPath containing conics with different + conic weight values. + + @param compare SkPath to compare + @return true if SkPath verb array and weights are equivalent + + example: https://fiddle.skia.org/c/@Path_isInterpolatable + */ + bool isInterpolatable(const SkPath& compare) const; + + /** Interpolates between SkPath with SkPoint array of equal size. + Copy verb array and weights to out, and set out SkPoint array to a weighted + average of this SkPoint array and ending SkPoint array, using the formula: + (Path Point * weight) + ending Point * (1 - weight). + + weight is most useful when between zero (ending SkPoint array) and + one (this Point_Array); will work with values outside of this + range. + + interpolate() returns false and leaves out unchanged if SkPoint array is not + the same size as ending SkPoint array. Call isInterpolatable() to check SkPath + compatibility prior to calling interpolate(). + + @param ending SkPoint array averaged with this SkPoint array + @param weight contribution of this SkPoint array, and + one minus contribution of ending SkPoint array + @param out SkPath replaced by interpolated averages + @return true if SkPath contain same number of SkPoint + + example: https://fiddle.skia.org/c/@Path_interpolate + */ + bool interpolate(const SkPath& ending, SkScalar weight, SkPath* out) const; + + /** Returns SkPathFillType, the rule used to fill SkPath. + + @return current SkPathFillType setting + */ + SkPathFillType getFillType() const { return (SkPathFillType)fFillType; } + + /** Sets FillType, the rule used to fill SkPath. While there is no check + that ft is legal, values outside of FillType are not supported. + */ + void setFillType(SkPathFillType ft) { + fFillType = SkToU8(ft); + } + + /** Returns if FillType describes area outside SkPath geometry. The inverse fill area + extends indefinitely. + + @return true if FillType is kInverseWinding or kInverseEvenOdd + */ + bool isInverseFillType() const { return SkPathFillType_IsInverse(this->getFillType()); } + + /** Replaces FillType with its inverse. The inverse of FillType describes the area + unmodified by the original FillType. + */ + void toggleInverseFillType() { + fFillType ^= 2; + } + + /** Returns true if the path is convex. If necessary, it will first compute the convexity. + */ + bool isConvex() const { + return SkPathConvexity::kConvex == this->getConvexity(); + } + + /** Returns true if this path is recognized as an oval or circle. + + bounds receives bounds of oval. + + bounds is unmodified if oval is not found. + + @param bounds storage for bounding SkRect of oval; may be nullptr + @return true if SkPath is recognized as an oval or circle + + example: https://fiddle.skia.org/c/@Path_isOval + */ + bool isOval(SkRect* bounds) const; + + /** Returns true if path is representable as SkRRect. + Returns false if path is representable as oval, circle, or SkRect. + + rrect receives bounds of SkRRect. + + rrect is unmodified if SkRRect is not found. + + @param rrect storage for bounding SkRect of SkRRect; may be nullptr + @return true if SkPath contains only SkRRect + + example: https://fiddle.skia.org/c/@Path_isRRect + */ + bool isRRect(SkRRect* rrect) const; + + /** Sets SkPath to its initial state. + Removes verb array, SkPoint array, and weights, and sets FillType to kWinding. + Internal storage associated with SkPath is released. + + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_reset + */ + SkPath& reset(); + + /** Sets SkPath to its initial state, preserving internal storage. + Removes verb array, SkPoint array, and weights, and sets FillType to kWinding. + Internal storage associated with SkPath is retained. + + Use rewind() instead of reset() if SkPath storage will be reused and performance + is critical. + + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_rewind + */ + SkPath& rewind(); + + /** Returns if SkPath is empty. + Empty SkPath may have FillType but has no SkPoint, SkPath::Verb, or conic weight. + SkPath() constructs empty SkPath; reset() and rewind() make SkPath empty. + + @return true if the path contains no SkPath::Verb array + */ + bool isEmpty() const { + SkDEBUGCODE(this->validate();) + return 0 == fPathRef->countVerbs(); + } + + /** Returns if contour is closed. + Contour is closed if SkPath SkPath::Verb array was last modified by close(). When stroked, + closed contour draws SkPaint::Join instead of SkPaint::Cap at first and last SkPoint. + + @return true if the last contour ends with a kClose_Verb + + example: https://fiddle.skia.org/c/@Path_isLastContourClosed + */ + bool isLastContourClosed() const; + + /** Returns true for finite SkPoint array values between negative SK_ScalarMax and + positive SK_ScalarMax. Returns false for any SkPoint array value of + SK_ScalarInfinity, SK_ScalarNegativeInfinity, or SK_ScalarNaN. + + @return true if all SkPoint values are finite + */ + bool isFinite() const { + SkDEBUGCODE(this->validate();) + return fPathRef->isFinite(); + } + + /** Returns true if the path is volatile; it will not be altered or discarded + by the caller after it is drawn. SkPath by default have volatile set false, allowing + SkSurface to attach a cache of data which speeds repeated drawing. If true, SkSurface + may not speed repeated drawing. + + @return true if caller will alter SkPath after drawing + */ + bool isVolatile() const { + return SkToBool(fIsVolatile); + } + + /** Specifies whether SkPath is volatile; whether it will be altered or discarded + by the caller after it is drawn. SkPath by default have volatile set false, allowing + SkBaseDevice to attach a cache of data which speeds repeated drawing. + + Mark temporary paths, discarded or modified after use, as volatile + to inform SkBaseDevice that the path need not be cached. + + Mark animating SkPath volatile to improve performance. + Mark unchanging SkPath non-volatile to improve repeated rendering. + + raster surface SkPath draws are affected by volatile for some shadows. + GPU surface SkPath draws are affected by volatile for some shadows and concave geometries. + + @param isVolatile true if caller will alter SkPath after drawing + @return reference to SkPath + */ + SkPath& setIsVolatile(bool isVolatile) { + fIsVolatile = isVolatile; + return *this; + } + + /** Tests if line between SkPoint pair is degenerate. + Line with no length or that moves a very short distance is degenerate; it is + treated as a point. + + exact changes the equality test. If true, returns true only if p1 equals p2. + If false, returns true if p1 equals or nearly equals p2. + + @param p1 line start point + @param p2 line end point + @param exact if false, allow nearly equals + @return true if line is degenerate; its length is effectively zero + + example: https://fiddle.skia.org/c/@Path_IsLineDegenerate + */ + static bool IsLineDegenerate(const SkPoint& p1, const SkPoint& p2, bool exact); + + /** Tests if quad is degenerate. + Quad with no length or that moves a very short distance is degenerate; it is + treated as a point. + + @param p1 quad start point + @param p2 quad control point + @param p3 quad end point + @param exact if true, returns true only if p1, p2, and p3 are equal; + if false, returns true if p1, p2, and p3 are equal or nearly equal + @return true if quad is degenerate; its length is effectively zero + */ + static bool IsQuadDegenerate(const SkPoint& p1, const SkPoint& p2, + const SkPoint& p3, bool exact); + + /** Tests if cubic is degenerate. + Cubic with no length or that moves a very short distance is degenerate; it is + treated as a point. + + @param p1 cubic start point + @param p2 cubic control point 1 + @param p3 cubic control point 2 + @param p4 cubic end point + @param exact if true, returns true only if p1, p2, p3, and p4 are equal; + if false, returns true if p1, p2, p3, and p4 are equal or nearly equal + @return true if cubic is degenerate; its length is effectively zero + */ + static bool IsCubicDegenerate(const SkPoint& p1, const SkPoint& p2, + const SkPoint& p3, const SkPoint& p4, bool exact); + + /** Returns true if SkPath contains only one line; + SkPath::Verb array has two entries: kMove_Verb, kLine_Verb. + If SkPath contains one line and line is not nullptr, line is set to + line start point and line end point. + Returns false if SkPath is not one line; line is unaltered. + + @param line storage for line. May be nullptr + @return true if SkPath contains exactly one line + + example: https://fiddle.skia.org/c/@Path_isLine + */ + bool isLine(SkPoint line[2]) const; + + /** Returns the number of points in SkPath. + SkPoint count is initially zero. + + @return SkPath SkPoint array length + + example: https://fiddle.skia.org/c/@Path_countPoints + */ + int countPoints() const; + + /** Returns SkPoint at index in SkPoint array. Valid range for index is + 0 to countPoints() - 1. + Returns (0, 0) if index is out of range. + + @param index SkPoint array element selector + @return SkPoint array value or (0, 0) + + example: https://fiddle.skia.org/c/@Path_getPoint + */ + SkPoint getPoint(int index) const; + + /** Returns number of points in SkPath. Up to max points are copied. + points may be nullptr; then, max must be zero. + If max is greater than number of points, excess points storage is unaltered. + + @param points storage for SkPath SkPoint array. May be nullptr + @param max maximum to copy; must be greater than or equal to zero + @return SkPath SkPoint array length + + example: https://fiddle.skia.org/c/@Path_getPoints + */ + int getPoints(SkPoint points[], int max) const; + + /** Returns the number of verbs: kMove_Verb, kLine_Verb, kQuad_Verb, kConic_Verb, + kCubic_Verb, and kClose_Verb; added to SkPath. + + @return length of verb array + + example: https://fiddle.skia.org/c/@Path_countVerbs + */ + int countVerbs() const; + + /** Returns the number of verbs in the path. Up to max verbs are copied. The + verbs are copied as one byte per verb. + + @param verbs storage for verbs, may be nullptr + @param max maximum number to copy into verbs + @return the actual number of verbs in the path + + example: https://fiddle.skia.org/c/@Path_getVerbs + */ + int getVerbs(uint8_t verbs[], int max) const; + + /** Returns the approximate byte size of the SkPath in memory. + + @return approximate size + */ + size_t approximateBytesUsed() const; + + /** Exchanges the verb array, SkPoint array, weights, and SkPath::FillType with other. + Cached state is also exchanged. swap() internally exchanges pointers, so + it is lightweight and does not allocate memory. + + swap() usage has largely been replaced by operator=(const SkPath& path). + SkPath do not copy their content on assignment until they are written to, + making assignment as efficient as swap(). + + @param other SkPath exchanged by value + + example: https://fiddle.skia.org/c/@Path_swap + */ + void swap(SkPath& other); + + /** Returns minimum and maximum axes values of SkPoint array. + Returns (0, 0, 0, 0) if SkPath contains no points. Returned bounds width and height may + be larger or smaller than area affected when SkPath is drawn. + + SkRect returned includes all SkPoint added to SkPath, including SkPoint associated with + kMove_Verb that define empty contours. + + @return bounds of all SkPoint in SkPoint array + */ + const SkRect& getBounds() const { + return fPathRef->getBounds(); + } + + /** Updates internal bounds so that subsequent calls to getBounds() are instantaneous. + Unaltered copies of SkPath may also access cached bounds through getBounds(). + + For now, identical to calling getBounds() and ignoring the returned value. + + Call to prepare SkPath subsequently drawn from multiple threads, + to avoid a race condition where each draw separately computes the bounds. + */ + void updateBoundsCache() const { + // for now, just calling getBounds() is sufficient + this->getBounds(); + } + + /** Returns minimum and maximum axes values of the lines and curves in SkPath. + Returns (0, 0, 0, 0) if SkPath contains no points. + Returned bounds width and height may be larger or smaller than area affected + when SkPath is drawn. + + Includes SkPoint associated with kMove_Verb that define empty + contours. + + Behaves identically to getBounds() when SkPath contains + only lines. If SkPath contains curves, computed bounds includes + the maximum extent of the quad, conic, or cubic; is slower than getBounds(); + and unlike getBounds(), does not cache the result. + + @return tight bounds of curves in SkPath + + example: https://fiddle.skia.org/c/@Path_computeTightBounds + */ + SkRect computeTightBounds() const; + + /** Returns true if rect is contained by SkPath. + May return false when rect is contained by SkPath. + + For now, only returns true if SkPath has one contour and is convex. + rect may share points and edges with SkPath and be contained. + Returns true if rect is empty, that is, it has zero width or height; and + the SkPoint or line described by rect is contained by SkPath. + + @param rect SkRect, line, or SkPoint checked for containment + @return true if rect is contained + + example: https://fiddle.skia.org/c/@Path_conservativelyContainsRect + */ + bool conservativelyContainsRect(const SkRect& rect) const; + + /** Grows SkPath verb array and SkPoint array to contain extraPtCount additional SkPoint. + May improve performance and use less memory by + reducing the number and size of allocations when creating SkPath. + + @param extraPtCount number of additional SkPoint to allocate + + example: https://fiddle.skia.org/c/@Path_incReserve + */ + void incReserve(int extraPtCount); + +#ifdef SK_HIDE_PATH_EDIT_METHODS +private: +#endif + + /** Adds beginning of contour at SkPoint (x, y). + + @param x x-axis value of contour start + @param y y-axis value of contour start + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_moveTo + */ + SkPath& moveTo(SkScalar x, SkScalar y); + + /** Adds beginning of contour at SkPoint p. + + @param p contour start + @return reference to SkPath + */ + SkPath& moveTo(const SkPoint& p) { + return this->moveTo(p.fX, p.fY); + } + + /** Adds beginning of contour relative to last point. + If SkPath is empty, starts contour at (dx, dy). + Otherwise, start contour at last point offset by (dx, dy). + Function name stands for "relative move to". + + @param dx offset from last point to contour start on x-axis + @param dy offset from last point to contour start on y-axis + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_rMoveTo + */ + SkPath& rMoveTo(SkScalar dx, SkScalar dy); + + /** Adds line from last point to (x, y). If SkPath is empty, or last SkPath::Verb is + kClose_Verb, last point is set to (0, 0) before adding line. + + lineTo() appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed. + lineTo() then appends kLine_Verb to verb array and (x, y) to SkPoint array. + + @param x end of added line on x-axis + @param y end of added line on y-axis + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_lineTo + */ + SkPath& lineTo(SkScalar x, SkScalar y); + + /** Adds line from last point to SkPoint p. If SkPath is empty, or last SkPath::Verb is + kClose_Verb, last point is set to (0, 0) before adding line. + + lineTo() first appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed. + lineTo() then appends kLine_Verb to verb array and SkPoint p to SkPoint array. + + @param p end SkPoint of added line + @return reference to SkPath + */ + SkPath& lineTo(const SkPoint& p) { + return this->lineTo(p.fX, p.fY); + } + + /** Adds line from last point to vector (dx, dy). If SkPath is empty, or last SkPath::Verb is + kClose_Verb, last point is set to (0, 0) before adding line. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed; + then appends kLine_Verb to verb array and line end to SkPoint array. + Line end is last point plus vector (dx, dy). + Function name stands for "relative line to". + + @param dx offset from last point to line end on x-axis + @param dy offset from last point to line end on y-axis + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_rLineTo + example: https://fiddle.skia.org/c/@Quad_a + example: https://fiddle.skia.org/c/@Quad_b + */ + SkPath& rLineTo(SkScalar dx, SkScalar dy); + + /** Adds quad from last point towards (x1, y1), to (x2, y2). + If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0) + before adding quad. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed; + then appends kQuad_Verb to verb array; and (x1, y1), (x2, y2) + to SkPoint array. + + @param x1 control SkPoint of quad on x-axis + @param y1 control SkPoint of quad on y-axis + @param x2 end SkPoint of quad on x-axis + @param y2 end SkPoint of quad on y-axis + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_quadTo + */ + SkPath& quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2); + + /** Adds quad from last point towards SkPoint p1, to SkPoint p2. + If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0) + before adding quad. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed; + then appends kQuad_Verb to verb array; and SkPoint p1, p2 + to SkPoint array. + + @param p1 control SkPoint of added quad + @param p2 end SkPoint of added quad + @return reference to SkPath + */ + SkPath& quadTo(const SkPoint& p1, const SkPoint& p2) { + return this->quadTo(p1.fX, p1.fY, p2.fX, p2.fY); + } + + /** Adds quad from last point towards vector (dx1, dy1), to vector (dx2, dy2). + If SkPath is empty, or last SkPath::Verb + is kClose_Verb, last point is set to (0, 0) before adding quad. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, + if needed; then appends kQuad_Verb to verb array; and appends quad + control and quad end to SkPoint array. + Quad control is last point plus vector (dx1, dy1). + Quad end is last point plus vector (dx2, dy2). + Function name stands for "relative quad to". + + @param dx1 offset from last point to quad control on x-axis + @param dy1 offset from last point to quad control on y-axis + @param dx2 offset from last point to quad end on x-axis + @param dy2 offset from last point to quad end on y-axis + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Conic_Weight_a + example: https://fiddle.skia.org/c/@Conic_Weight_b + example: https://fiddle.skia.org/c/@Conic_Weight_c + example: https://fiddle.skia.org/c/@Path_rQuadTo + */ + SkPath& rQuadTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2); + + /** Adds conic from last point towards (x1, y1), to (x2, y2), weighted by w. + If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0) + before adding conic. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed. + + If w is finite and not one, appends kConic_Verb to verb array; + and (x1, y1), (x2, y2) to SkPoint array; and w to conic weights. + + If w is one, appends kQuad_Verb to verb array, and + (x1, y1), (x2, y2) to SkPoint array. + + If w is not finite, appends kLine_Verb twice to verb array, and + (x1, y1), (x2, y2) to SkPoint array. + + @param x1 control SkPoint of conic on x-axis + @param y1 control SkPoint of conic on y-axis + @param x2 end SkPoint of conic on x-axis + @param y2 end SkPoint of conic on y-axis + @param w weight of added conic + @return reference to SkPath + */ + SkPath& conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, + SkScalar w); + + /** Adds conic from last point towards SkPoint p1, to SkPoint p2, weighted by w. + If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0) + before adding conic. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed. + + If w is finite and not one, appends kConic_Verb to verb array; + and SkPoint p1, p2 to SkPoint array; and w to conic weights. + + If w is one, appends kQuad_Verb to verb array, and SkPoint p1, p2 + to SkPoint array. + + If w is not finite, appends kLine_Verb twice to verb array, and + SkPoint p1, p2 to SkPoint array. + + @param p1 control SkPoint of added conic + @param p2 end SkPoint of added conic + @param w weight of added conic + @return reference to SkPath + */ + SkPath& conicTo(const SkPoint& p1, const SkPoint& p2, SkScalar w) { + return this->conicTo(p1.fX, p1.fY, p2.fX, p2.fY, w); + } + + /** Adds conic from last point towards vector (dx1, dy1), to vector (dx2, dy2), + weighted by w. If SkPath is empty, or last SkPath::Verb + is kClose_Verb, last point is set to (0, 0) before adding conic. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, + if needed. + + If w is finite and not one, next appends kConic_Verb to verb array, + and w is recorded as conic weight; otherwise, if w is one, appends + kQuad_Verb to verb array; or if w is not finite, appends kLine_Verb + twice to verb array. + + In all cases appends SkPoint control and end to SkPoint array. + control is last point plus vector (dx1, dy1). + end is last point plus vector (dx2, dy2). + + Function name stands for "relative conic to". + + @param dx1 offset from last point to conic control on x-axis + @param dy1 offset from last point to conic control on y-axis + @param dx2 offset from last point to conic end on x-axis + @param dy2 offset from last point to conic end on y-axis + @param w weight of added conic + @return reference to SkPath + */ + SkPath& rConicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2, + SkScalar w); + + /** Adds cubic from last point towards (x1, y1), then towards (x2, y2), ending at + (x3, y3). If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to + (0, 0) before adding cubic. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed; + then appends kCubic_Verb to verb array; and (x1, y1), (x2, y2), (x3, y3) + to SkPoint array. + + @param x1 first control SkPoint of cubic on x-axis + @param y1 first control SkPoint of cubic on y-axis + @param x2 second control SkPoint of cubic on x-axis + @param y2 second control SkPoint of cubic on y-axis + @param x3 end SkPoint of cubic on x-axis + @param y3 end SkPoint of cubic on y-axis + @return reference to SkPath + */ + SkPath& cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, + SkScalar x3, SkScalar y3); + + /** Adds cubic from last point towards SkPoint p1, then towards SkPoint p2, ending at + SkPoint p3. If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to + (0, 0) before adding cubic. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed; + then appends kCubic_Verb to verb array; and SkPoint p1, p2, p3 + to SkPoint array. + + @param p1 first control SkPoint of cubic + @param p2 second control SkPoint of cubic + @param p3 end SkPoint of cubic + @return reference to SkPath + */ + SkPath& cubicTo(const SkPoint& p1, const SkPoint& p2, const SkPoint& p3) { + return this->cubicTo(p1.fX, p1.fY, p2.fX, p2.fY, p3.fX, p3.fY); + } + + /** Adds cubic from last point towards vector (dx1, dy1), then towards + vector (dx2, dy2), to vector (dx3, dy3). + If SkPath is empty, or last SkPath::Verb + is kClose_Verb, last point is set to (0, 0) before adding cubic. + + Appends kMove_Verb to verb array and (0, 0) to SkPoint array, + if needed; then appends kCubic_Verb to verb array; and appends cubic + control and cubic end to SkPoint array. + Cubic control is last point plus vector (dx1, dy1). + Cubic end is last point plus vector (dx2, dy2). + Function name stands for "relative cubic to". + + @param dx1 offset from last point to first cubic control on x-axis + @param dy1 offset from last point to first cubic control on y-axis + @param dx2 offset from last point to second cubic control on x-axis + @param dy2 offset from last point to second cubic control on y-axis + @param dx3 offset from last point to cubic end on x-axis + @param dy3 offset from last point to cubic end on y-axis + @return reference to SkPath + */ + SkPath& rCubicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2, + SkScalar dx3, SkScalar dy3); + + /** Appends arc to SkPath. Arc added is part of ellipse + bounded by oval, from startAngle through sweepAngle. Both startAngle and + sweepAngle are measured in degrees, where zero degrees is aligned with the + positive x-axis, and positive sweeps extends arc clockwise. + + arcTo() adds line connecting SkPath last SkPoint to initial arc SkPoint if forceMoveTo + is false and SkPath is not empty. Otherwise, added contour begins with first point + of arc. Angles greater than -360 and less than 360 are treated modulo 360. + + @param oval bounds of ellipse containing arc + @param startAngle starting angle of arc in degrees + @param sweepAngle sweep, in degrees. Positive is clockwise; treated modulo 360 + @param forceMoveTo true to start a new contour with arc + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_arcTo + */ + SkPath& arcTo(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle, bool forceMoveTo); + + /** Appends arc to SkPath, after appending line if needed. Arc is implemented by conic + weighted to describe part of circle. Arc is contained by tangent from + last SkPath point to (x1, y1), and tangent from (x1, y1) to (x2, y2). Arc + is part of circle sized to radius, positioned so it touches both tangent lines. + + If last Path Point does not start Arc, arcTo appends connecting Line to Path. + The length of Vector from (x1, y1) to (x2, y2) does not affect Arc. + + Arc sweep is always less than 180 degrees. If radius is zero, or if + tangents are nearly parallel, arcTo appends Line from last Path Point to (x1, y1). + + arcTo appends at most one Line and one conic. + arcTo implements the functionality of PostScript arct and HTML Canvas arcTo. + + @param x1 x-axis value common to pair of tangents + @param y1 y-axis value common to pair of tangents + @param x2 x-axis value end of second tangent + @param y2 y-axis value end of second tangent + @param radius distance from arc to circle center + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_arcTo_2_a + example: https://fiddle.skia.org/c/@Path_arcTo_2_b + example: https://fiddle.skia.org/c/@Path_arcTo_2_c + */ + SkPath& arcTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar radius); + + /** Appends arc to SkPath, after appending line if needed. Arc is implemented by conic + weighted to describe part of circle. Arc is contained by tangent from + last SkPath point to p1, and tangent from p1 to p2. Arc + is part of circle sized to radius, positioned so it touches both tangent lines. + + If last SkPath SkPoint does not start arc, arcTo() appends connecting line to SkPath. + The length of vector from p1 to p2 does not affect arc. + + Arc sweep is always less than 180 degrees. If radius is zero, or if + tangents are nearly parallel, arcTo() appends line from last SkPath SkPoint to p1. + + arcTo() appends at most one line and one conic. + arcTo() implements the functionality of PostScript arct and HTML Canvas arcTo. + + @param p1 SkPoint common to pair of tangents + @param p2 end of second tangent + @param radius distance from arc to circle center + @return reference to SkPath + */ + SkPath& arcTo(const SkPoint p1, const SkPoint p2, SkScalar radius) { + return this->arcTo(p1.fX, p1.fY, p2.fX, p2.fY, radius); + } + + /** \enum SkPath::ArcSize + Four oval parts with radii (rx, ry) start at last SkPath SkPoint and ends at (x, y). + ArcSize and Direction select one of the four oval parts. + */ + enum ArcSize { + kSmall_ArcSize, //!< smaller of arc pair + kLarge_ArcSize, //!< larger of arc pair + }; + + /** Appends arc to SkPath. Arc is implemented by one or more conics weighted to + describe part of oval with radii (rx, ry) rotated by xAxisRotate degrees. Arc + curves from last SkPath SkPoint to (x, y), choosing one of four possible routes: + clockwise or counterclockwise, and smaller or larger. + + Arc sweep is always less than 360 degrees. arcTo() appends line to (x, y) if + either radii are zero, or if last SkPath SkPoint equals (x, y). arcTo() scales radii + (rx, ry) to fit last SkPath SkPoint and (x, y) if both are greater than zero but + too small. + + arcTo() appends up to four conic curves. + arcTo() implements the functionality of SVG arc, although SVG sweep-flag value + is opposite the integer value of sweep; SVG sweep-flag uses 1 for clockwise, + while kCW_Direction cast to int is zero. + + @param rx radius on x-axis before x-axis rotation + @param ry radius on y-axis before x-axis rotation + @param xAxisRotate x-axis rotation in degrees; positive values are clockwise + @param largeArc chooses smaller or larger arc + @param sweep chooses clockwise or counterclockwise arc + @param x end of arc + @param y end of arc + @return reference to SkPath + */ + SkPath& arcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, ArcSize largeArc, + SkPathDirection sweep, SkScalar x, SkScalar y); + + /** Appends arc to SkPath. Arc is implemented by one or more conic weighted to describe + part of oval with radii (r.fX, r.fY) rotated by xAxisRotate degrees. Arc curves + from last SkPath SkPoint to (xy.fX, xy.fY), choosing one of four possible routes: + clockwise or counterclockwise, + and smaller or larger. + + Arc sweep is always less than 360 degrees. arcTo() appends line to xy if either + radii are zero, or if last SkPath SkPoint equals (xy.fX, xy.fY). arcTo() scales radii r to + fit last SkPath SkPoint and xy if both are greater than zero but too small to describe + an arc. + + arcTo() appends up to four conic curves. + arcTo() implements the functionality of SVG arc, although SVG sweep-flag value is + opposite the integer value of sweep; SVG sweep-flag uses 1 for clockwise, while + kCW_Direction cast to int is zero. + + @param r radii on axes before x-axis rotation + @param xAxisRotate x-axis rotation in degrees; positive values are clockwise + @param largeArc chooses smaller or larger arc + @param sweep chooses clockwise or counterclockwise arc + @param xy end of arc + @return reference to SkPath + */ + SkPath& arcTo(const SkPoint r, SkScalar xAxisRotate, ArcSize largeArc, SkPathDirection sweep, + const SkPoint xy) { + return this->arcTo(r.fX, r.fY, xAxisRotate, largeArc, sweep, xy.fX, xy.fY); + } + + /** Appends arc to SkPath, relative to last SkPath SkPoint. Arc is implemented by one or + more conic, weighted to describe part of oval with radii (rx, ry) rotated by + xAxisRotate degrees. Arc curves from last SkPath SkPoint to relative end SkPoint: + (dx, dy), choosing one of four possible routes: clockwise or + counterclockwise, and smaller or larger. If SkPath is empty, the start arc SkPoint + is (0, 0). + + Arc sweep is always less than 360 degrees. arcTo() appends line to end SkPoint + if either radii are zero, or if last SkPath SkPoint equals end SkPoint. + arcTo() scales radii (rx, ry) to fit last SkPath SkPoint and end SkPoint if both are + greater than zero but too small to describe an arc. + + arcTo() appends up to four conic curves. + arcTo() implements the functionality of svg arc, although SVG "sweep-flag" value is + opposite the integer value of sweep; SVG "sweep-flag" uses 1 for clockwise, while + kCW_Direction cast to int is zero. + + @param rx radius before x-axis rotation + @param ry radius before x-axis rotation + @param xAxisRotate x-axis rotation in degrees; positive values are clockwise + @param largeArc chooses smaller or larger arc + @param sweep chooses clockwise or counterclockwise arc + @param dx x-axis offset end of arc from last SkPath SkPoint + @param dy y-axis offset end of arc from last SkPath SkPoint + @return reference to SkPath + */ + SkPath& rArcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, ArcSize largeArc, + SkPathDirection sweep, SkScalar dx, SkScalar dy); + + /** Appends kClose_Verb to SkPath. A closed contour connects the first and last SkPoint + with line, forming a continuous loop. Open and closed contour draw the same + with SkPaint::kFill_Style. With SkPaint::kStroke_Style, open contour draws + SkPaint::Cap at contour start and end; closed contour draws + SkPaint::Join at contour start and end. + + close() has no effect if SkPath is empty or last SkPath SkPath::Verb is kClose_Verb. + + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_close + */ + SkPath& close(); + +#ifdef SK_HIDE_PATH_EDIT_METHODS +public: +#endif + + /** Approximates conic with quad array. Conic is constructed from start SkPoint p0, + control SkPoint p1, end SkPoint p2, and weight w. + Quad array is stored in pts; this storage is supplied by caller. + Maximum quad count is 2 to the pow2. + Every third point in array shares last SkPoint of previous quad and first SkPoint of + next quad. Maximum pts storage size is given by: + (1 + 2 * (1 << pow2)) * sizeof(SkPoint). + + Returns quad count used the approximation, which may be smaller + than the number requested. + + conic weight determines the amount of influence conic control point has on the curve. + w less than one represents an elliptical section. w greater than one represents + a hyperbolic section. w equal to one represents a parabolic section. + + Two quad curves are sufficient to approximate an elliptical conic with a sweep + of up to 90 degrees; in this case, set pow2 to one. + + @param p0 conic start SkPoint + @param p1 conic control SkPoint + @param p2 conic end SkPoint + @param w conic weight + @param pts storage for quad array + @param pow2 quad count, as power of two, normally 0 to 5 (1 to 32 quad curves) + @return number of quad curves written to pts + */ + static int ConvertConicToQuads(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2, + SkScalar w, SkPoint pts[], int pow2); + + /** Returns true if SkPath is equivalent to SkRect when filled. + If false: rect, isClosed, and direction are unchanged. + If true: rect, isClosed, and direction are written to if not nullptr. + + rect may be smaller than the SkPath bounds. SkPath bounds may include kMove_Verb points + that do not alter the area drawn by the returned rect. + + @param rect storage for bounds of SkRect; may be nullptr + @param isClosed storage set to true if SkPath is closed; may be nullptr + @param direction storage set to SkRect direction; may be nullptr + @return true if SkPath contains SkRect + + example: https://fiddle.skia.org/c/@Path_isRect + */ + bool isRect(SkRect* rect, bool* isClosed = nullptr, SkPathDirection* direction = nullptr) const; + +#ifdef SK_HIDE_PATH_EDIT_METHODS +private: +#endif + + /** Adds a new contour to the path, defined by the rect, and wound in the + specified direction. The verbs added to the path will be: + + kMove, kLine, kLine, kLine, kClose + + start specifies which corner to begin the contour: + 0: upper-left corner + 1: upper-right corner + 2: lower-right corner + 3: lower-left corner + + This start point also acts as the implied beginning of the subsequent, + contour, if it does not have an explicit moveTo(). e.g. + + path.addRect(...) + // if we don't say moveTo() here, we will use the rect's start point + path.lineTo(...) + + @param rect SkRect to add as a closed contour + @param dir SkPath::Direction to orient the new contour + @param start initial corner of SkRect to add + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_addRect_2 + */ + SkPath& addRect(const SkRect& rect, SkPathDirection dir, unsigned start); + + SkPath& addRect(const SkRect& rect, SkPathDirection dir = SkPathDirection::kCW) { + return this->addRect(rect, dir, 0); + } + + SkPath& addRect(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom, + SkPathDirection dir = SkPathDirection::kCW) { + return this->addRect({left, top, right, bottom}, dir, 0); + } + + /** Adds oval to path, appending kMove_Verb, four kConic_Verb, and kClose_Verb. + Oval is upright ellipse bounded by SkRect oval with radii equal to half oval width + and half oval height. Oval begins at (oval.fRight, oval.centerY()) and continues + clockwise if dir is kCW_Direction, counterclockwise if dir is kCCW_Direction. + + @param oval bounds of ellipse added + @param dir SkPath::Direction to wind ellipse + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_addOval + */ + SkPath& addOval(const SkRect& oval, SkPathDirection dir = SkPathDirection::kCW); + + /** Adds oval to SkPath, appending kMove_Verb, four kConic_Verb, and kClose_Verb. + Oval is upright ellipse bounded by SkRect oval with radii equal to half oval width + and half oval height. Oval begins at start and continues + clockwise if dir is kCW_Direction, counterclockwise if dir is kCCW_Direction. + + @param oval bounds of ellipse added + @param dir SkPath::Direction to wind ellipse + @param start index of initial point of ellipse + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_addOval_2 + */ + SkPath& addOval(const SkRect& oval, SkPathDirection dir, unsigned start); + + /** Adds circle centered at (x, y) of size radius to SkPath, appending kMove_Verb, + four kConic_Verb, and kClose_Verb. Circle begins at: (x + radius, y), continuing + clockwise if dir is kCW_Direction, and counterclockwise if dir is kCCW_Direction. + + Has no effect if radius is zero or negative. + + @param x center of circle + @param y center of circle + @param radius distance from center to edge + @param dir SkPath::Direction to wind circle + @return reference to SkPath + */ + SkPath& addCircle(SkScalar x, SkScalar y, SkScalar radius, + SkPathDirection dir = SkPathDirection::kCW); + + /** Appends arc to SkPath, as the start of new contour. Arc added is part of ellipse + bounded by oval, from startAngle through sweepAngle. Both startAngle and + sweepAngle are measured in degrees, where zero degrees is aligned with the + positive x-axis, and positive sweeps extends arc clockwise. + + If sweepAngle <= -360, or sweepAngle >= 360; and startAngle modulo 90 is nearly + zero, append oval instead of arc. Otherwise, sweepAngle values are treated + modulo 360, and arc may or may not draw depending on numeric rounding. + + @param oval bounds of ellipse containing arc + @param startAngle starting angle of arc in degrees + @param sweepAngle sweep, in degrees. Positive is clockwise; treated modulo 360 + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_addArc + */ + SkPath& addArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle); + + /** Appends SkRRect to SkPath, creating a new closed contour. SkRRect has bounds + equal to rect; each corner is 90 degrees of an ellipse with radii (rx, ry). If + dir is kCW_Direction, SkRRect starts at top-left of the lower-left corner and + winds clockwise. If dir is kCCW_Direction, SkRRect starts at the bottom-left + of the upper-left corner and winds counterclockwise. + + If either rx or ry is too large, rx and ry are scaled uniformly until the + corners fit. If rx or ry is less than or equal to zero, addRoundRect() appends + SkRect rect to SkPath. + + After appending, SkPath may be empty, or may contain: SkRect, oval, or SkRRect. + + @param rect bounds of SkRRect + @param rx x-axis radius of rounded corners on the SkRRect + @param ry y-axis radius of rounded corners on the SkRRect + @param dir SkPath::Direction to wind SkRRect + @return reference to SkPath + */ + SkPath& addRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry, + SkPathDirection dir = SkPathDirection::kCW); + + /** Appends SkRRect to SkPath, creating a new closed contour. SkRRect has bounds + equal to rect; each corner is 90 degrees of an ellipse with radii from the + array. + + @param rect bounds of SkRRect + @param radii array of 8 SkScalar values, a radius pair for each corner + @param dir SkPath::Direction to wind SkRRect + @return reference to SkPath + */ + SkPath& addRoundRect(const SkRect& rect, const SkScalar radii[], + SkPathDirection dir = SkPathDirection::kCW); + + /** Adds rrect to SkPath, creating a new closed contour. If + dir is kCW_Direction, rrect starts at top-left of the lower-left corner and + winds clockwise. If dir is kCCW_Direction, rrect starts at the bottom-left + of the upper-left corner and winds counterclockwise. + + After appending, SkPath may be empty, or may contain: SkRect, oval, or SkRRect. + + @param rrect bounds and radii of rounded rectangle + @param dir SkPath::Direction to wind SkRRect + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_addRRect + */ + SkPath& addRRect(const SkRRect& rrect, SkPathDirection dir = SkPathDirection::kCW); + + /** Adds rrect to SkPath, creating a new closed contour. If dir is kCW_Direction, rrect + winds clockwise; if dir is kCCW_Direction, rrect winds counterclockwise. + start determines the first point of rrect to add. + + @param rrect bounds and radii of rounded rectangle + @param dir SkPath::Direction to wind SkRRect + @param start index of initial point of SkRRect + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_addRRect_2 + */ + SkPath& addRRect(const SkRRect& rrect, SkPathDirection dir, unsigned start); + + /** Adds contour created from line array, adding (count - 1) line segments. + Contour added starts at pts[0], then adds a line for every additional SkPoint + in pts array. If close is true, appends kClose_Verb to SkPath, connecting + pts[count - 1] and pts[0]. + + If count is zero, append kMove_Verb to path. + Has no effect if count is less than one. + + @param pts array of line sharing end and start SkPoint + @param count length of SkPoint array + @param close true to add line connecting contour end and start + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_addPoly + */ + SkPath& addPoly(const SkPoint pts[], int count, bool close); + + /** Adds contour created from list. Contour added starts at list[0], then adds a line + for every additional SkPoint in list. If close is true, appends kClose_Verb to SkPath, + connecting last and first SkPoint in list. + + If list is empty, append kMove_Verb to path. + + @param list array of SkPoint + @param close true to add line connecting contour end and start + @return reference to SkPath + */ + SkPath& addPoly(const std::initializer_list<SkPoint>& list, bool close) { + return this->addPoly(list.begin(), SkToInt(list.size()), close); + } + +#ifdef SK_HIDE_PATH_EDIT_METHODS +public: +#endif + + /** \enum SkPath::AddPathMode + AddPathMode chooses how addPath() appends. Adding one SkPath to another can extend + the last contour or start a new contour. + */ + enum AddPathMode { + kAppend_AddPathMode, //!< appended to destination unaltered + kExtend_AddPathMode, //!< add line if prior contour is not closed + }; + + /** Appends src to SkPath, offset by (dx, dy). + + If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are + added unaltered. If mode is kExtend_AddPathMode, add line before appending + verbs, SkPoint, and conic weights. + + @param src SkPath verbs, SkPoint, and conic weights to add + @param dx offset added to src SkPoint array x-axis coordinates + @param dy offset added to src SkPoint array y-axis coordinates + @param mode kAppend_AddPathMode or kExtend_AddPathMode + @return reference to SkPath + */ + SkPath& addPath(const SkPath& src, SkScalar dx, SkScalar dy, + AddPathMode mode = kAppend_AddPathMode); + + /** Appends src to SkPath. + + If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are + added unaltered. If mode is kExtend_AddPathMode, add line before appending + verbs, SkPoint, and conic weights. + + @param src SkPath verbs, SkPoint, and conic weights to add + @param mode kAppend_AddPathMode or kExtend_AddPathMode + @return reference to SkPath + */ + SkPath& addPath(const SkPath& src, AddPathMode mode = kAppend_AddPathMode) { + SkMatrix m; + m.reset(); + return this->addPath(src, m, mode); + } + + /** Appends src to SkPath, transformed by matrix. Transformed curves may have different + verbs, SkPoint, and conic weights. + + If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are + added unaltered. If mode is kExtend_AddPathMode, add line before appending + verbs, SkPoint, and conic weights. + + @param src SkPath verbs, SkPoint, and conic weights to add + @param matrix transform applied to src + @param mode kAppend_AddPathMode or kExtend_AddPathMode + @return reference to SkPath + */ + SkPath& addPath(const SkPath& src, const SkMatrix& matrix, + AddPathMode mode = kAppend_AddPathMode); + + /** Appends src to SkPath, from back to front. + Reversed src always appends a new contour to SkPath. + + @param src SkPath verbs, SkPoint, and conic weights to add + @return reference to SkPath + + example: https://fiddle.skia.org/c/@Path_reverseAddPath + */ + SkPath& reverseAddPath(const SkPath& src); + + /** Offsets SkPoint array by (dx, dy). Offset SkPath replaces dst. + If dst is nullptr, SkPath is replaced by offset data. + + @param dx offset added to SkPoint array x-axis coordinates + @param dy offset added to SkPoint array y-axis coordinates + @param dst overwritten, translated copy of SkPath; may be nullptr + + example: https://fiddle.skia.org/c/@Path_offset + */ + void offset(SkScalar dx, SkScalar dy, SkPath* dst) const; + + /** Offsets SkPoint array by (dx, dy). SkPath is replaced by offset data. + + @param dx offset added to SkPoint array x-axis coordinates + @param dy offset added to SkPoint array y-axis coordinates + */ + void offset(SkScalar dx, SkScalar dy) { + this->offset(dx, dy, this); + } + + /** Transforms verb array, SkPoint array, and weight by matrix. + transform may change verbs and increase their number. + Transformed SkPath replaces dst; if dst is nullptr, original data + is replaced. + + @param matrix SkMatrix to apply to SkPath + @param dst overwritten, transformed copy of SkPath; may be nullptr + @param pc whether to apply perspective clipping + + example: https://fiddle.skia.org/c/@Path_transform + */ + void transform(const SkMatrix& matrix, SkPath* dst, + SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const; + + /** Transforms verb array, SkPoint array, and weight by matrix. + transform may change verbs and increase their number. + SkPath is replaced by transformed data. + + @param matrix SkMatrix to apply to SkPath + @param pc whether to apply perspective clipping + */ + void transform(const SkMatrix& matrix, + SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) { + this->transform(matrix, this, pc); + } + + SkPath makeTransform(const SkMatrix& m, + SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const { + SkPath dst; + this->transform(m, &dst, pc); + return dst; + } + + SkPath makeScale(SkScalar sx, SkScalar sy) { + return this->makeTransform(SkMatrix::Scale(sx, sy), SkApplyPerspectiveClip::kNo); + } + + /** Returns last point on SkPath in lastPt. Returns false if SkPoint array is empty, + storing (0, 0) if lastPt is not nullptr. + + @param lastPt storage for final SkPoint in SkPoint array; may be nullptr + @return true if SkPoint array contains one or more SkPoint + + example: https://fiddle.skia.org/c/@Path_getLastPt + */ + bool getLastPt(SkPoint* lastPt) const; + + /** Sets last point to (x, y). If SkPoint array is empty, append kMove_Verb to + verb array and append (x, y) to SkPoint array. + + @param x set x-axis value of last point + @param y set y-axis value of last point + + example: https://fiddle.skia.org/c/@Path_setLastPt + */ + void setLastPt(SkScalar x, SkScalar y); + + /** Sets the last point on the path. If SkPoint array is empty, append kMove_Verb to + verb array and append p to SkPoint array. + + @param p set value of last point + */ + void setLastPt(const SkPoint& p) { + this->setLastPt(p.fX, p.fY); + } + + /** \enum SkPath::SegmentMask + SegmentMask constants correspond to each drawing Verb type in SkPath; for + instance, if SkPath only contains lines, only the kLine_SegmentMask bit is set. + */ + enum SegmentMask { + kLine_SegmentMask = kLine_SkPathSegmentMask, + kQuad_SegmentMask = kQuad_SkPathSegmentMask, + kConic_SegmentMask = kConic_SkPathSegmentMask, + kCubic_SegmentMask = kCubic_SkPathSegmentMask, + }; + + /** Returns a mask, where each set bit corresponds to a SegmentMask constant + if SkPath contains one or more verbs of that type. + Returns zero if SkPath contains no lines, or curves: quads, conics, or cubics. + + getSegmentMasks() returns a cached result; it is very fast. + + @return SegmentMask bits or zero + */ + uint32_t getSegmentMasks() const { return fPathRef->getSegmentMasks(); } + + /** \enum SkPath::Verb + Verb instructs SkPath how to interpret one or more SkPoint and optional conic weight; + manage contour, and terminate SkPath. + */ + enum Verb { + kMove_Verb = static_cast<int>(SkPathVerb::kMove), + kLine_Verb = static_cast<int>(SkPathVerb::kLine), + kQuad_Verb = static_cast<int>(SkPathVerb::kQuad), + kConic_Verb = static_cast<int>(SkPathVerb::kConic), + kCubic_Verb = static_cast<int>(SkPathVerb::kCubic), + kClose_Verb = static_cast<int>(SkPathVerb::kClose), + kDone_Verb = kClose_Verb + 1 + }; + + /** \class SkPath::Iter + Iterates through verb array, and associated SkPoint array and conic weight. + Provides options to treat open contours as closed, and to ignore + degenerate data. + */ + class SK_API Iter { + public: + + /** Initializes SkPath::Iter with an empty SkPath. next() on SkPath::Iter returns + kDone_Verb. + Call setPath to initialize SkPath::Iter at a later time. + + @return SkPath::Iter of empty SkPath + + example: https://fiddle.skia.org/c/@Path_Iter_Iter + */ + Iter(); + + /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in + path. If forceClose is true, SkPath::Iter will add kLine_Verb and kClose_Verb after each + open contour. path is not altered. + + @param path SkPath to iterate + @param forceClose true if open contours generate kClose_Verb + @return SkPath::Iter of path + + example: https://fiddle.skia.org/c/@Path_Iter_const_SkPath + */ + Iter(const SkPath& path, bool forceClose); + + /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in + path. If forceClose is true, SkPath::Iter will add kLine_Verb and kClose_Verb after each + open contour. path is not altered. + + @param path SkPath to iterate + @param forceClose true if open contours generate kClose_Verb + + example: https://fiddle.skia.org/c/@Path_Iter_setPath + */ + void setPath(const SkPath& path, bool forceClose); + + /** Returns next SkPath::Verb in verb array, and advances SkPath::Iter. + When verb array is exhausted, returns kDone_Verb. + + Zero to four SkPoint are stored in pts, depending on the returned SkPath::Verb. + + @param pts storage for SkPoint data describing returned SkPath::Verb + @return next SkPath::Verb from verb array + + example: https://fiddle.skia.org/c/@Path_RawIter_next + */ + Verb next(SkPoint pts[4]); + + /** Returns conic weight if next() returned kConic_Verb. + + If next() has not been called, or next() did not return kConic_Verb, + result is undefined. + + @return conic weight for conic SkPoint returned by next() + */ + SkScalar conicWeight() const { return *fConicWeights; } + + /** Returns true if last kLine_Verb returned by next() was generated + by kClose_Verb. When true, the end point returned by next() is + also the start point of contour. + + If next() has not been called, or next() did not return kLine_Verb, + result is undefined. + + @return true if last kLine_Verb was generated by kClose_Verb + */ + bool isCloseLine() const { return SkToBool(fCloseLine); } + + /** Returns true if subsequent calls to next() return kClose_Verb before returning + kMove_Verb. if true, contour SkPath::Iter is processing may end with kClose_Verb, or + SkPath::Iter may have been initialized with force close set to true. + + @return true if contour is closed + + example: https://fiddle.skia.org/c/@Path_Iter_isClosedContour + */ + bool isClosedContour() const; + + private: + const SkPoint* fPts; + const uint8_t* fVerbs; + const uint8_t* fVerbStop; + const SkScalar* fConicWeights; + SkPoint fMoveTo; + SkPoint fLastPt; + bool fForceClose; + bool fNeedClose; + bool fCloseLine; + + Verb autoClose(SkPoint pts[2]); + }; + +private: + /** \class SkPath::RangeIter + Iterates through a raw range of path verbs, points, and conics. All values are returned + unaltered. + + NOTE: This class will be moved into SkPathPriv once RangeIter is removed. + */ + class RangeIter { + public: + RangeIter() = default; + RangeIter(const uint8_t* verbs, const SkPoint* points, const SkScalar* weights) + : fVerb(verbs), fPoints(points), fWeights(weights) { + SkDEBUGCODE(fInitialPoints = fPoints;) + } + bool operator!=(const RangeIter& that) const { + return fVerb != that.fVerb; + } + bool operator==(const RangeIter& that) const { + return fVerb == that.fVerb; + } + RangeIter& operator++() { + auto verb = static_cast<SkPathVerb>(*fVerb++); + fPoints += pts_advance_after_verb(verb); + if (verb == SkPathVerb::kConic) { + ++fWeights; + } + return *this; + } + RangeIter operator++(int) { + RangeIter copy = *this; + this->operator++(); + return copy; + } + SkPathVerb peekVerb() const { + return static_cast<SkPathVerb>(*fVerb); + } + std::tuple<SkPathVerb, const SkPoint*, const SkScalar*> operator*() const { + SkPathVerb verb = this->peekVerb(); + // We provide the starting point for beziers by peeking backwards from the current + // point, which works fine as long as there is always a kMove before any geometry. + // (SkPath::injectMoveToIfNeeded should have guaranteed this to be the case.) + int backset = pts_backset_for_verb(verb); + SkASSERT(fPoints + backset >= fInitialPoints); + return {verb, fPoints + backset, fWeights}; + } + private: + constexpr static int pts_advance_after_verb(SkPathVerb verb) { + switch (verb) { + case SkPathVerb::kMove: return 1; + case SkPathVerb::kLine: return 1; + case SkPathVerb::kQuad: return 2; + case SkPathVerb::kConic: return 2; + case SkPathVerb::kCubic: return 3; + case SkPathVerb::kClose: return 0; + } + SkUNREACHABLE; + } + constexpr static int pts_backset_for_verb(SkPathVerb verb) { + switch (verb) { + case SkPathVerb::kMove: return 0; + case SkPathVerb::kLine: return -1; + case SkPathVerb::kQuad: return -1; + case SkPathVerb::kConic: return -1; + case SkPathVerb::kCubic: return -1; + case SkPathVerb::kClose: return -1; + } + SkUNREACHABLE; + } + const uint8_t* fVerb = nullptr; + const SkPoint* fPoints = nullptr; + const SkScalar* fWeights = nullptr; + SkDEBUGCODE(const SkPoint* fInitialPoints = nullptr;) + }; +public: + + /** \class SkPath::RawIter + Use Iter instead. This class will soon be removed and RangeIter will be made private. + */ + class SK_API RawIter { + public: + + /** Initializes RawIter with an empty SkPath. next() on RawIter returns kDone_Verb. + Call setPath to initialize SkPath::Iter at a later time. + + @return RawIter of empty SkPath + */ + RawIter() {} + + /** Sets RawIter to return elements of verb array, SkPoint array, and conic weight in path. + + @param path SkPath to iterate + @return RawIter of path + */ + RawIter(const SkPath& path) { + setPath(path); + } + + /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in + path. + + @param path SkPath to iterate + */ + void setPath(const SkPath&); + + /** Returns next SkPath::Verb in verb array, and advances RawIter. + When verb array is exhausted, returns kDone_Verb. + Zero to four SkPoint are stored in pts, depending on the returned SkPath::Verb. + + @param pts storage for SkPoint data describing returned SkPath::Verb + @return next SkPath::Verb from verb array + */ + Verb next(SkPoint[4]); + + /** Returns next SkPath::Verb, but does not advance RawIter. + + @return next SkPath::Verb from verb array + */ + Verb peek() const { + return (fIter != fEnd) ? static_cast<Verb>(std::get<0>(*fIter)) : kDone_Verb; + } + + /** Returns conic weight if next() returned kConic_Verb. + + If next() has not been called, or next() did not return kConic_Verb, + result is undefined. + + @return conic weight for conic SkPoint returned by next() + */ + SkScalar conicWeight() const { + return fConicWeight; + } + + private: + RangeIter fIter; + RangeIter fEnd; + SkScalar fConicWeight = 0; + friend class SkPath; + + }; + + /** Returns true if the point (x, y) is contained by SkPath, taking into + account FillType. + + @param x x-axis value of containment test + @param y y-axis value of containment test + @return true if SkPoint is in SkPath + + example: https://fiddle.skia.org/c/@Path_contains + */ + bool contains(SkScalar x, SkScalar y) const; + + /** Writes text representation of SkPath to stream. If stream is nullptr, writes to + standard output. Set dumpAsHex true to generate exact binary representations + of floating point numbers used in SkPoint array and conic weights. + + @param stream writable SkWStream receiving SkPath text representation; may be nullptr + @param dumpAsHex true if SkScalar values are written as hexadecimal + + example: https://fiddle.skia.org/c/@Path_dump + */ + void dump(SkWStream* stream, bool dumpAsHex) const; + + void dump() const { this->dump(nullptr, false); } + void dumpHex() const { this->dump(nullptr, true); } + + // Like dump(), but outputs for the SkPath::Make() factory + void dumpArrays(SkWStream* stream, bool dumpAsHex) const; + void dumpArrays() const { this->dumpArrays(nullptr, false); } + + /** Writes SkPath to buffer, returning the number of bytes written. + Pass nullptr to obtain the storage size. + + Writes SkPath::FillType, verb array, SkPoint array, conic weight, and + additionally writes computed information like SkPath::Convexity and bounds. + + Use only be used in concert with readFromMemory(); + the format used for SkPath in memory is not guaranteed. + + @param buffer storage for SkPath; may be nullptr + @return size of storage required for SkPath; always a multiple of 4 + + example: https://fiddle.skia.org/c/@Path_writeToMemory + */ + size_t writeToMemory(void* buffer) const; + + /** Writes SkPath to buffer, returning the buffer written to, wrapped in SkData. + + serialize() writes SkPath::FillType, verb array, SkPoint array, conic weight, and + additionally writes computed information like SkPath::Convexity and bounds. + + serialize() should only be used in concert with readFromMemory(). + The format used for SkPath in memory is not guaranteed. + + @return SkPath data wrapped in SkData buffer + + example: https://fiddle.skia.org/c/@Path_serialize + */ + sk_sp<SkData> serialize() const; + + /** Initializes SkPath from buffer of size length. Returns zero if the buffer is + data is inconsistent, or the length is too small. + + Reads SkPath::FillType, verb array, SkPoint array, conic weight, and + additionally reads computed information like SkPath::Convexity and bounds. + + Used only in concert with writeToMemory(); + the format used for SkPath in memory is not guaranteed. + + @param buffer storage for SkPath + @param length buffer size in bytes; must be multiple of 4 + @return number of bytes read, or zero on failure + + example: https://fiddle.skia.org/c/@Path_readFromMemory + */ + size_t readFromMemory(const void* buffer, size_t length); + + /** (See Skia bug 1762.) + Returns a non-zero, globally unique value. A different value is returned + if verb array, SkPoint array, or conic weight changes. + + Setting SkPath::FillType does not change generation identifier. + + Each time the path is modified, a different generation identifier will be returned. + SkPath::FillType does affect generation identifier on Android framework. + + @return non-zero, globally unique value + + example: https://fiddle.skia.org/c/@Path_getGenerationID + */ + uint32_t getGenerationID() const; + + /** Returns if SkPath data is consistent. Corrupt SkPath data is detected if + internal values are out of range or internal storage does not match + array dimensions. + + @return true if SkPath data is consistent + */ + bool isValid() const { return this->isValidImpl() && fPathRef->isValid(); } + +private: + SkPath(sk_sp<SkPathRef>, SkPathFillType, bool isVolatile, SkPathConvexity, + SkPathFirstDirection firstDirection); + + sk_sp<SkPathRef> fPathRef; + int fLastMoveToIndex; + mutable std::atomic<uint8_t> fConvexity; // SkPathConvexity + mutable std::atomic<uint8_t> fFirstDirection; // SkPathFirstDirection + uint8_t fFillType : 2; + uint8_t fIsVolatile : 1; + + /** Resets all fields other than fPathRef to their initial 'empty' values. + * Assumes the caller has already emptied fPathRef. + * On Android increments fGenerationID without reseting it. + */ + void resetFields(); + + /** Sets all fields other than fPathRef to the values in 'that'. + * Assumes the caller has already set fPathRef. + * Doesn't change fGenerationID or fSourcePath on Android. + */ + void copyFields(const SkPath& that); + + size_t writeToMemoryAsRRect(void* buffer) const; + size_t readAsRRect(const void*, size_t); + size_t readFromMemory_EQ4Or5(const void*, size_t); + + friend class Iter; + friend class SkPathPriv; + friend class SkPathStroker; + + /* Append, in reverse order, the first contour of path, ignoring path's + last point. If no moveTo() call has been made for this contour, the + first point is automatically set to (0,0). + */ + SkPath& reversePathTo(const SkPath&); + + // called before we add points for lineTo, quadTo, cubicTo, checking to see + // if we need to inject a leading moveTo first + // + // SkPath path; path.lineTo(...); <--- need a leading moveTo(0, 0) + // SkPath path; ... path.close(); path.lineTo(...) <-- need a moveTo(previous moveTo) + // + inline void injectMoveToIfNeeded(); + + inline bool hasOnlyMoveTos() const; + + SkPathConvexity computeConvexity() const; + + /** Asserts if SkPath data is inconsistent. + Debugging check intended for internal use only. + */ + SkDEBUGCODE(void validate() const { SkASSERT(this->isValidImpl()); } ) + bool isValidImpl() const; + SkDEBUGCODE(void validateRef() const { fPathRef->validate(); } ) + + // called by stroker to see if all points (in the last contour) are equal and worthy of a cap + bool isZeroLengthSincePoint(int startPtIndex) const; + + /** Returns if the path can return a bound at no cost (true) or will have to + perform some computation (false). + */ + bool hasComputedBounds() const { + SkDEBUGCODE(this->validate();) + return fPathRef->hasComputedBounds(); + } + + + // 'rect' needs to be sorted + void setBounds(const SkRect& rect) { + SkPathRef::Editor ed(&fPathRef); + + ed.setBounds(rect); + } + + void setPt(int index, SkScalar x, SkScalar y); + + SkPath& dirtyAfterEdit(); + + // Bottlenecks for working with fConvexity and fFirstDirection. + // Notice the setters are const... these are mutable atomic fields. + void setConvexity(SkPathConvexity) const; + + void setFirstDirection(SkPathFirstDirection) const; + SkPathFirstDirection getFirstDirection() const; + + /** Returns the comvexity type, computing if needed. Never returns kUnknown. + @return path's convexity type (convex or concave) + */ + SkPathConvexity getConvexity() const; + + SkPathConvexity getConvexityOrUnknown() const { + return (SkPathConvexity)fConvexity.load(std::memory_order_relaxed); + } + + // Compares the cached value with a freshly computed one (computeConvexity()) + bool isConvexityAccurate() const; + + /** Stores a convexity type for this path. This is what will be returned if + * getConvexityOrUnknown() is called. If you pass kUnknown, then if getContexityType() + * is called, the real convexity will be computed. + * + * example: https://fiddle.skia.org/c/@Path_setConvexity + */ + void setConvexity(SkPathConvexity convexity); + + /** Shrinks SkPath verb array and SkPoint array storage to discard unused capacity. + * May reduce the heap overhead for SkPath known to be fully constructed. + * + * NOTE: This may relocate the underlying buffers, and thus any Iterators referencing + * this path should be discarded after calling shrinkToFit(). + */ + void shrinkToFit(); + + friend class SkAutoPathBoundsUpdate; + friend class SkAutoDisableOvalCheck; + friend class SkAutoDisableDirectionCheck; + friend class SkPathBuilder; + friend class SkPathEdgeIter; + friend class SkPathWriter; + friend class SkOpBuilder; + friend class SkBench_AddPathTest; // perf test reversePathTo + friend class PathTest_Private; // unit test reversePathTo + friend class ForceIsRRect_Private; // unit test isRRect + friend class FuzzPath; // for legacy access to validateRef +}; + +#endif diff --git a/src/deps/skia/include/core/SkPathBuilder.h b/src/deps/skia/include/core/SkPathBuilder.h new file mode 100644 index 000000000..ec10de87e --- /dev/null +++ b/src/deps/skia/include/core/SkPathBuilder.h @@ -0,0 +1,268 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPathBuilder_DEFINED +#define SkPathBuilder_DEFINED + +#include "include/core/SkMatrix.h" +#include "include/core/SkPath.h" +#include "include/core/SkPathTypes.h" +#include "include/private/SkTDArray.h" + +class SK_API SkPathBuilder { +public: + SkPathBuilder(); + SkPathBuilder(SkPathFillType); + SkPathBuilder(const SkPath&); + SkPathBuilder(const SkPathBuilder&) = default; + ~SkPathBuilder(); + + SkPathBuilder& operator=(const SkPath&); + SkPathBuilder& operator=(const SkPathBuilder&) = default; + + SkPathFillType fillType() const { return fFillType; } + SkRect computeBounds() const; + + SkPath snapshot() const; // the builder is unchanged after returning this path + SkPath detach(); // the builder is reset to empty after returning this path + + SkPathBuilder& setFillType(SkPathFillType ft) { fFillType = ft; return *this; } + SkPathBuilder& setIsVolatile(bool isVolatile) { fIsVolatile = isVolatile; return *this; } + + SkPathBuilder& reset(); + + SkPathBuilder& moveTo(SkPoint pt); + SkPathBuilder& moveTo(SkScalar x, SkScalar y) { return this->moveTo(SkPoint::Make(x, y)); } + + SkPathBuilder& lineTo(SkPoint pt); + SkPathBuilder& lineTo(SkScalar x, SkScalar y) { return this->lineTo(SkPoint::Make(x, y)); } + + SkPathBuilder& quadTo(SkPoint pt1, SkPoint pt2); + SkPathBuilder& quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) { + return this->quadTo(SkPoint::Make(x1, y1), SkPoint::Make(x2, y2)); + } + SkPathBuilder& quadTo(const SkPoint pts[2]) { return this->quadTo(pts[0], pts[1]); } + + SkPathBuilder& conicTo(SkPoint pt1, SkPoint pt2, SkScalar w); + SkPathBuilder& conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar w) { + return this->conicTo(SkPoint::Make(x1, y1), SkPoint::Make(x2, y2), w); + } + SkPathBuilder& conicTo(const SkPoint pts[2], SkScalar w) { + return this->conicTo(pts[0], pts[1], w); + } + + SkPathBuilder& cubicTo(SkPoint pt1, SkPoint pt2, SkPoint pt3); + SkPathBuilder& cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar x3, SkScalar y3) { + return this->cubicTo(SkPoint::Make(x1, y1), SkPoint::Make(x2, y2), SkPoint::Make(x3, y3)); + } + SkPathBuilder& cubicTo(const SkPoint pts[3]) { + return this->cubicTo(pts[0], pts[1], pts[2]); + } + + SkPathBuilder& close(); + + // Append a series of lineTo(...) + SkPathBuilder& polylineTo(const SkPoint pts[], int count); + SkPathBuilder& polylineTo(const std::initializer_list<SkPoint>& list) { + return this->polylineTo(list.begin(), SkToInt(list.size())); + } + + // Relative versions of segments, relative to the previous position. + + SkPathBuilder& rLineTo(SkPoint pt); + SkPathBuilder& rLineTo(SkScalar x, SkScalar y) { return this->rLineTo({x, y}); } + SkPathBuilder& rQuadTo(SkPoint pt1, SkPoint pt2); + SkPathBuilder& rQuadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) { + return this->rQuadTo({x1, y1}, {x2, y2}); + } + SkPathBuilder& rConicTo(SkPoint p1, SkPoint p2, SkScalar w); + SkPathBuilder& rConicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar w) { + return this->rConicTo({x1, y1}, {x2, y2}, w); + } + SkPathBuilder& rCubicTo(SkPoint pt1, SkPoint pt2, SkPoint pt3); + SkPathBuilder& rCubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar x3, SkScalar y3) { + return this->rCubicTo({x1, y1}, {x2, y2}, {x3, y3}); + } + + // Arcs + + /** Appends arc to the builder. Arc added is part of ellipse + bounded by oval, from startAngle through sweepAngle. Both startAngle and + sweepAngle are measured in degrees, where zero degrees is aligned with the + positive x-axis, and positive sweeps extends arc clockwise. + + arcTo() adds line connecting the builder's last point to initial arc point if forceMoveTo + is false and the builder is not empty. Otherwise, added contour begins with first point + of arc. Angles greater than -360 and less than 360 are treated modulo 360. + + @param oval bounds of ellipse containing arc + @param startAngleDeg starting angle of arc in degrees + @param sweepAngleDeg sweep, in degrees. Positive is clockwise; treated modulo 360 + @param forceMoveTo true to start a new contour with arc + @return reference to the builder + */ + SkPathBuilder& arcTo(const SkRect& oval, SkScalar startAngleDeg, SkScalar sweepAngleDeg, + bool forceMoveTo); + + /** Appends arc to SkPath, after appending line if needed. Arc is implemented by conic + weighted to describe part of circle. Arc is contained by tangent from + last SkPath point to p1, and tangent from p1 to p2. Arc + is part of circle sized to radius, positioned so it touches both tangent lines. + + If last SkPath SkPoint does not start arc, arcTo() appends connecting line to SkPath. + The length of vector from p1 to p2 does not affect arc. + + Arc sweep is always less than 180 degrees. If radius is zero, or if + tangents are nearly parallel, arcTo() appends line from last SkPath SkPoint to p1. + + arcTo() appends at most one line and one conic. + arcTo() implements the functionality of PostScript arct and HTML Canvas arcTo. + + @param p1 SkPoint common to pair of tangents + @param p2 end of second tangent + @param radius distance from arc to circle center + @return reference to SkPath + */ + SkPathBuilder& arcTo(SkPoint p1, SkPoint p2, SkScalar radius); + + enum ArcSize { + kSmall_ArcSize, //!< smaller of arc pair + kLarge_ArcSize, //!< larger of arc pair + }; + + /** Appends arc to SkPath. Arc is implemented by one or more conic weighted to describe + part of oval with radii (r.fX, r.fY) rotated by xAxisRotate degrees. Arc curves + from last SkPath SkPoint to (xy.fX, xy.fY), choosing one of four possible routes: + clockwise or counterclockwise, + and smaller or larger. + + Arc sweep is always less than 360 degrees. arcTo() appends line to xy if either + radii are zero, or if last SkPath SkPoint equals (xy.fX, xy.fY). arcTo() scales radii r to + fit last SkPath SkPoint and xy if both are greater than zero but too small to describe + an arc. + + arcTo() appends up to four conic curves. + arcTo() implements the functionality of SVG arc, although SVG sweep-flag value is + opposite the integer value of sweep; SVG sweep-flag uses 1 for clockwise, while + kCW_Direction cast to int is zero. + + @param r radii on axes before x-axis rotation + @param xAxisRotate x-axis rotation in degrees; positive values are clockwise + @param largeArc chooses smaller or larger arc + @param sweep chooses clockwise or counterclockwise arc + @param xy end of arc + @return reference to SkPath + */ + SkPathBuilder& arcTo(SkPoint r, SkScalar xAxisRotate, ArcSize largeArc, SkPathDirection sweep, + SkPoint xy); + + /** Appends arc to the builder, as the start of new contour. Arc added is part of ellipse + bounded by oval, from startAngle through sweepAngle. Both startAngle and + sweepAngle are measured in degrees, where zero degrees is aligned with the + positive x-axis, and positive sweeps extends arc clockwise. + + If sweepAngle <= -360, or sweepAngle >= 360; and startAngle modulo 90 is nearly + zero, append oval instead of arc. Otherwise, sweepAngle values are treated + modulo 360, and arc may or may not draw depending on numeric rounding. + + @param oval bounds of ellipse containing arc + @param startAngleDeg starting angle of arc in degrees + @param sweepAngleDeg sweep, in degrees. Positive is clockwise; treated modulo 360 + @return reference to this builder + */ + SkPathBuilder& addArc(const SkRect& oval, SkScalar startAngleDeg, SkScalar sweepAngleDeg); + + // Add a new contour + + SkPathBuilder& addRect(const SkRect&, SkPathDirection, unsigned startIndex); + SkPathBuilder& addOval(const SkRect&, SkPathDirection, unsigned startIndex); + SkPathBuilder& addRRect(const SkRRect&, SkPathDirection, unsigned startIndex); + + SkPathBuilder& addRect(const SkRect& rect, SkPathDirection dir = SkPathDirection::kCW) { + return this->addRect(rect, dir, 0); + } + SkPathBuilder& addOval(const SkRect& rect, SkPathDirection dir = SkPathDirection::kCW) { + // legacy start index: 1 + return this->addOval(rect, dir, 1); + } + SkPathBuilder& addRRect(const SkRRect& rrect, SkPathDirection dir = SkPathDirection::kCW) { + // legacy start indices: 6 (CW) and 7 (CCW) + return this->addRRect(rrect, dir, dir == SkPathDirection::kCW ? 6 : 7); + } + + SkPathBuilder& addCircle(SkScalar center_x, SkScalar center_y, SkScalar radius, + SkPathDirection dir = SkPathDirection::kCW); + + SkPathBuilder& addPolygon(const SkPoint pts[], int count, bool isClosed); + SkPathBuilder& addPolygon(const std::initializer_list<SkPoint>& list, bool isClosed) { + return this->addPolygon(list.begin(), SkToInt(list.size()), isClosed); + } + + SkPathBuilder& addPath(const SkPath&); + + // Performance hint, to reserve extra storage for subsequent calls to lineTo, quadTo, etc. + + void incReserve(int extraPtCount, int extraVerbCount); + void incReserve(int extraPtCount) { + this->incReserve(extraPtCount, extraPtCount); + } + + SkPathBuilder& offset(SkScalar dx, SkScalar dy); + + SkPathBuilder& toggleInverseFillType() { + fFillType = (SkPathFillType)((unsigned)fFillType ^ 2); + return *this; + } + +private: + SkTDArray<SkPoint> fPts; + SkTDArray<uint8_t> fVerbs; + SkTDArray<SkScalar> fConicWeights; + + SkPathFillType fFillType; + bool fIsVolatile; + + unsigned fSegmentMask; + SkPoint fLastMovePoint; + int fLastMoveIndex; // only needed until SkPath is immutable + bool fNeedsMoveVerb; + + enum IsA { + kIsA_JustMoves, // we only have 0 or more moves + kIsA_MoreThanMoves, // we have verbs other than just move + kIsA_Oval, // we are 0 or more moves followed by an oval + kIsA_RRect, // we are 0 or more moves followed by a rrect + }; + IsA fIsA = kIsA_JustMoves; + int fIsAStart = -1; // tracks direction iff fIsA is not unknown + bool fIsACCW = false; // tracks direction iff fIsA is not unknown + + // for testing + SkPathConvexity fOverrideConvexity = SkPathConvexity::kUnknown; + + int countVerbs() const { return fVerbs.count(); } + + // called right before we add a (non-move) verb + void ensureMove() { + fIsA = kIsA_MoreThanMoves; + if (fNeedsMoveVerb) { + this->moveTo(fLastMovePoint); + } + } + + SkPath make(sk_sp<SkPathRef>) const; + + SkPathBuilder& privateReverseAddPath(const SkPath&); + + // For testing + void privateSetConvexity(SkPathConvexity c) { fOverrideConvexity = c; } + + friend class SkPathPriv; +}; + +#endif + diff --git a/src/deps/skia/include/core/SkPathEffect.h b/src/deps/skia/include/core/SkPathEffect.h new file mode 100644 index 000000000..abb370c52 --- /dev/null +++ b/src/deps/skia/include/core/SkPathEffect.h @@ -0,0 +1,106 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPathEffect_DEFINED +#define SkPathEffect_DEFINED + +#include "include/core/SkFlattenable.h" +#include "include/core/SkScalar.h" +// not needed, but some of our clients need it (they don't IWYU) +#include "include/core/SkPath.h" + +class SkPath; +struct SkRect; +class SkStrokeRec; + +/** \class SkPathEffect + + SkPathEffect is the base class for objects in the SkPaint that affect + the geometry of a drawing primitive before it is transformed by the + canvas' matrix and drawn. + + Dashing is implemented as a subclass of SkPathEffect. +*/ +class SK_API SkPathEffect : public SkFlattenable { +public: + /** + * Returns a patheffect that apples each effect (first and second) to the original path, + * and returns a path with the sum of these. + * + * result = first(path) + second(path) + * + */ + static sk_sp<SkPathEffect> MakeSum(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second); + + /** + * Returns a patheffect that applies the inner effect to the path, and then applies the + * outer effect to the result of the inner's. + * + * result = outer(inner(path)) + */ + static sk_sp<SkPathEffect> MakeCompose(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner); + + static SkFlattenable::Type GetFlattenableType() { + return kSkPathEffect_Type; + } + + // move to base? + + enum DashType { + kNone_DashType, //!< ignores the info parameter + kDash_DashType, //!< fills in all of the info parameter + }; + + struct DashInfo { + DashInfo() : fIntervals(nullptr), fCount(0), fPhase(0) {} + DashInfo(SkScalar* intervals, int32_t count, SkScalar phase) + : fIntervals(intervals), fCount(count), fPhase(phase) {} + + SkScalar* fIntervals; //!< Length of on/off intervals for dashed lines + // Even values represent ons, and odds offs + int32_t fCount; //!< Number of intervals in the dash. Should be even number + SkScalar fPhase; //!< Offset into the dashed interval pattern + // mod the sum of all intervals + }; + + DashType asADash(DashInfo* info) const; + + /** + * Given a src path (input) and a stroke-rec (input and output), apply + * this effect to the src path, returning the new path in dst, and return + * true. If this effect cannot be applied, return false and ignore dst + * and stroke-rec. + * + * The stroke-rec specifies the initial request for stroking (if any). + * The effect can treat this as input only, or it can choose to change + * the rec as well. For example, the effect can decide to change the + * stroke's width or join, or the effect can change the rec from stroke + * to fill (or fill to stroke) in addition to returning a new (dst) path. + * + * If this method returns true, the caller will apply (as needed) the + * resulting stroke-rec to dst and then draw. + */ + bool filterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect* cullR) const; + + /** Version of filterPath that can be called when the CTM is known. */ + bool filterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect* cullR, + const SkMatrix& ctm) const; + + /** True if this path effect requires a valid CTM */ + bool needsCTM() const; + + static sk_sp<SkPathEffect> Deserialize(const void* data, size_t size, + const SkDeserialProcs* procs = nullptr); + +private: + SkPathEffect() = default; + friend class SkPathEffectBase; + + using INHERITED = SkFlattenable; +}; + +#endif diff --git a/src/deps/skia/include/core/SkPathMeasure.h b/src/deps/skia/include/core/SkPathMeasure.h new file mode 100644 index 000000000..2335c7c23 --- /dev/null +++ b/src/deps/skia/include/core/SkPathMeasure.h @@ -0,0 +1,88 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPathMeasure_DEFINED +#define SkPathMeasure_DEFINED + +#include "include/core/SkContourMeasure.h" +#include "include/core/SkPath.h" +#include "include/private/SkTDArray.h" + +class SK_API SkPathMeasure { +public: + SkPathMeasure(); + /** Initialize the pathmeasure with the specified path. The parts of the path that are needed + * are copied, so the client is free to modify/delete the path after this call. + * + * resScale controls the precision of the measure. values > 1 increase the + * precision (and possibly slow down the computation). + */ + SkPathMeasure(const SkPath& path, bool forceClosed, SkScalar resScale = 1); + ~SkPathMeasure(); + + /** Reset the pathmeasure with the specified path. The parts of the path that are needed + * are copied, so the client is free to modify/delete the path after this call.. + */ + void setPath(const SkPath*, bool forceClosed); + + /** Return the total length of the current contour, or 0 if no path + is associated (e.g. resetPath(null)) + */ + SkScalar getLength(); + + /** Pins distance to 0 <= distance <= getLength(), and then computes + the corresponding position and tangent. + Returns false if there is no path, or a zero-length path was specified, in which case + position and tangent are unchanged. + */ + bool SK_WARN_UNUSED_RESULT getPosTan(SkScalar distance, SkPoint* position, + SkVector* tangent); + + enum MatrixFlags { + kGetPosition_MatrixFlag = 0x01, + kGetTangent_MatrixFlag = 0x02, + kGetPosAndTan_MatrixFlag = kGetPosition_MatrixFlag | kGetTangent_MatrixFlag + }; + + /** Pins distance to 0 <= distance <= getLength(), and then computes + the corresponding matrix (by calling getPosTan). + Returns false if there is no path, or a zero-length path was specified, in which case + matrix is unchanged. + */ + bool SK_WARN_UNUSED_RESULT getMatrix(SkScalar distance, SkMatrix* matrix, + MatrixFlags flags = kGetPosAndTan_MatrixFlag); + + /** Given a start and stop distance, return in dst the intervening segment(s). + If the segment is zero-length, return false, else return true. + startD and stopD are pinned to legal values (0..getLength()). If startD > stopD + then return false (and leave dst untouched). + Begin the segment with a moveTo if startWithMoveTo is true + */ + bool getSegment(SkScalar startD, SkScalar stopD, SkPath* dst, bool startWithMoveTo); + + /** Return true if the current contour is closed() + */ + bool isClosed(); + + /** Move to the next contour in the path. Return true if one exists, or false if + we're done with the path. + */ + bool nextContour(); + +#ifdef SK_DEBUG + void dump(); +#endif + +private: + SkContourMeasureIter fIter; + sk_sp<SkContourMeasure> fContour; + + SkPathMeasure(const SkPathMeasure&) = delete; + SkPathMeasure& operator=(const SkPathMeasure&) = delete; +}; + +#endif diff --git a/src/deps/skia/include/core/SkPathTypes.h b/src/deps/skia/include/core/SkPathTypes.h new file mode 100644 index 000000000..f589ea46c --- /dev/null +++ b/src/deps/skia/include/core/SkPathTypes.h @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPathTypes_DEFINED +#define SkPathTypes_DEFINED + +#include "include/core/SkTypes.h" + +enum class SkPathFillType { + /** Specifies that "inside" is computed by a non-zero sum of signed edge crossings */ + kWinding, + /** Specifies that "inside" is computed by an odd number of edge crossings */ + kEvenOdd, + /** Same as Winding, but draws outside of the path, rather than inside */ + kInverseWinding, + /** Same as EvenOdd, but draws outside of the path, rather than inside */ + kInverseEvenOdd +}; + +static inline bool SkPathFillType_IsEvenOdd(SkPathFillType ft) { + return (static_cast<int>(ft) & 1) != 0; +} + +static inline bool SkPathFillType_IsInverse(SkPathFillType ft) { + return (static_cast<int>(ft) & 2) != 0; +} + +static inline SkPathFillType SkPathFillType_ConvertToNonInverse(SkPathFillType ft) { + return static_cast<SkPathFillType>(static_cast<int>(ft) & 1); +} + +enum class SkPathDirection { + /** clockwise direction for adding closed contours */ + kCW, + /** counter-clockwise direction for adding closed contours */ + kCCW, +}; + +enum SkPathSegmentMask { + kLine_SkPathSegmentMask = 1 << 0, + kQuad_SkPathSegmentMask = 1 << 1, + kConic_SkPathSegmentMask = 1 << 2, + kCubic_SkPathSegmentMask = 1 << 3, +}; + +enum class SkPathVerb { + kMove, //!< SkPath::RawIter returns 1 point + kLine, //!< SkPath::RawIter returns 2 points + kQuad, //!< SkPath::RawIter returns 3 points + kConic, //!< SkPath::RawIter returns 3 points + 1 weight + kCubic, //!< SkPath::RawIter returns 4 points + kClose //!< SkPath::RawIter returns 0 points +}; + +#endif diff --git a/src/deps/skia/include/core/SkPicture.h b/src/deps/skia/include/core/SkPicture.h new file mode 100644 index 000000000..d05f13fc2 --- /dev/null +++ b/src/deps/skia/include/core/SkPicture.h @@ -0,0 +1,280 @@ +/* + * Copyright 2007 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPicture_DEFINED +#define SkPicture_DEFINED + +#include "include/core/SkRect.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkSamplingOptions.h" +#include "include/core/SkShader.h" +#include "include/core/SkTileMode.h" +#include "include/core/SkTypes.h" + +class SkCanvas; +class SkData; +struct SkDeserialProcs; +class SkImage; +class SkMatrix; +struct SkSerialProcs; +class SkStream; +class SkWStream; + +/** \class SkPicture + SkPicture records drawing commands made to SkCanvas. The command stream may be + played in whole or in part at a later time. + + SkPicture is an abstract class. SkPicture may be generated by SkPictureRecorder + or SkDrawable, or from SkPicture previously saved to SkData or SkStream. + + SkPicture may contain any SkCanvas drawing command, as well as one or more + SkCanvas matrix or SkCanvas clip. SkPicture has a cull SkRect, which is used as + a bounding box hint. To limit SkPicture bounds, use SkCanvas clip when + recording or drawing SkPicture. +*/ +class SK_API SkPicture : public SkRefCnt { +public: + ~SkPicture() override; + + /** Recreates SkPicture that was serialized into a stream. Returns constructed SkPicture + if successful; otherwise, returns nullptr. Fails if data does not permit + constructing valid SkPicture. + + procs->fPictureProc permits supplying a custom function to decode SkPicture. + If procs->fPictureProc is nullptr, default decoding is used. procs->fPictureCtx + may be used to provide user context to procs->fPictureProc; procs->fPictureProc + is called with a pointer to data, data byte length, and user context. + + @param stream container for serial data + @param procs custom serial data decoders; may be nullptr + @return SkPicture constructed from stream data + */ + static sk_sp<SkPicture> MakeFromStream(SkStream* stream, + const SkDeserialProcs* procs = nullptr); + + /** Recreates SkPicture that was serialized into data. Returns constructed SkPicture + if successful; otherwise, returns nullptr. Fails if data does not permit + constructing valid SkPicture. + + procs->fPictureProc permits supplying a custom function to decode SkPicture. + If procs->fPictureProc is nullptr, default decoding is used. procs->fPictureCtx + may be used to provide user context to procs->fPictureProc; procs->fPictureProc + is called with a pointer to data, data byte length, and user context. + + @param data container for serial data + @param procs custom serial data decoders; may be nullptr + @return SkPicture constructed from data + */ + static sk_sp<SkPicture> MakeFromData(const SkData* data, + const SkDeserialProcs* procs = nullptr); + + /** + + @param data pointer to serial data + @param size size of data + @param procs custom serial data decoders; may be nullptr + @return SkPicture constructed from data + */ + static sk_sp<SkPicture> MakeFromData(const void* data, size_t size, + const SkDeserialProcs* procs = nullptr); + + /** \class SkPicture::AbortCallback + AbortCallback is an abstract class. An implementation of AbortCallback may + passed as a parameter to SkPicture::playback, to stop it before all drawing + commands have been processed. + + If AbortCallback::abort returns true, SkPicture::playback is interrupted. + */ + class SK_API AbortCallback { + public: + /** Has no effect. + */ + virtual ~AbortCallback() = default; + + /** Stops SkPicture playback when some condition is met. A subclass of + AbortCallback provides an override for abort() that can stop SkPicture::playback. + + The part of SkPicture drawn when aborted is undefined. SkPicture instantiations are + free to stop drawing at different points during playback. + + If the abort happens inside one or more calls to SkCanvas::save(), stack + of SkCanvas matrix and SkCanvas clip values is restored to its state before + SkPicture::playback was called. + + @return true to stop playback + + example: https://fiddle.skia.org/c/@Picture_AbortCallback_abort + */ + virtual bool abort() = 0; + + protected: + AbortCallback() = default; + AbortCallback(const AbortCallback&) = delete; + AbortCallback& operator=(const AbortCallback&) = delete; + }; + + /** Replays the drawing commands on the specified canvas. In the case that the + commands are recorded, each command in the SkPicture is sent separately to canvas. + + To add a single command to draw SkPicture to recording canvas, call + SkCanvas::drawPicture instead. + + @param canvas receiver of drawing commands + @param callback allows interruption of playback + + example: https://fiddle.skia.org/c/@Picture_playback + */ + virtual void playback(SkCanvas* canvas, AbortCallback* callback = nullptr) const = 0; + + /** Returns cull SkRect for this picture, passed in when SkPicture was created. + Returned SkRect does not specify clipping SkRect for SkPicture; cull is hint + of SkPicture bounds. + + SkPicture is free to discard recorded drawing commands that fall outside + cull. + + @return bounds passed when SkPicture was created + + example: https://fiddle.skia.org/c/@Picture_cullRect + */ + virtual SkRect cullRect() const = 0; + + /** Returns a non-zero value unique among SkPicture in Skia process. + + @return identifier for SkPicture + */ + uint32_t uniqueID() const { return fUniqueID; } + + /** Returns storage containing SkData describing SkPicture, using optional custom + encoders. + + procs->fPictureProc permits supplying a custom function to encode SkPicture. + If procs->fPictureProc is nullptr, default encoding is used. procs->fPictureCtx + may be used to provide user context to procs->fPictureProc; procs->fPictureProc + is called with a pointer to SkPicture and user context. + + @param procs custom serial data encoders; may be nullptr + @return storage containing serialized SkPicture + + example: https://fiddle.skia.org/c/@Picture_serialize + */ + sk_sp<SkData> serialize(const SkSerialProcs* procs = nullptr) const; + + /** Writes picture to stream, using optional custom encoders. + + procs->fPictureProc permits supplying a custom function to encode SkPicture. + If procs->fPictureProc is nullptr, default encoding is used. procs->fPictureCtx + may be used to provide user context to procs->fPictureProc; procs->fPictureProc + is called with a pointer to SkPicture and user context. + + @param stream writable serial data stream + @param procs custom serial data encoders; may be nullptr + + example: https://fiddle.skia.org/c/@Picture_serialize_2 + */ + void serialize(SkWStream* stream, const SkSerialProcs* procs = nullptr) const; + + /** Returns a placeholder SkPicture. Result does not draw, and contains only + cull SkRect, a hint of its bounds. Result is immutable; it cannot be changed + later. Result identifier is unique. + + Returned placeholder can be intercepted during playback to insert other + commands into SkCanvas draw stream. + + @param cull placeholder dimensions + @return placeholder with unique identifier + + example: https://fiddle.skia.org/c/@Picture_MakePlaceholder + */ + static sk_sp<SkPicture> MakePlaceholder(SkRect cull); + + /** Returns the approximate number of operations in SkPicture. Returned value + may be greater or less than the number of SkCanvas calls + recorded: some calls may be recorded as more than one operation, other + calls may be optimized away. + + @param nested if true, include the op-counts of nested pictures as well, else + just return count the ops in the top-level picture. + @return approximate operation count + + example: https://fiddle.skia.org/c/@Picture_approximateOpCount + */ + virtual int approximateOpCount(bool nested = false) const = 0; + + /** Returns the approximate byte size of SkPicture. Does not include large objects + referenced by SkPicture. + + @return approximate size + + example: https://fiddle.skia.org/c/@Picture_approximateBytesUsed + */ + virtual size_t approximateBytesUsed() const = 0; + + /** Return a new shader that will draw with this picture. + * + * @param tmx The tiling mode to use when sampling in the x-direction. + * @param tmy The tiling mode to use when sampling in the y-direction. + * @param mode How to filter the tiles + * @param localMatrix Optional matrix used when sampling + * @param tile The tile rectangle in picture coordinates: this represents the subset + * (or superset) of the picture used when building a tile. It is not + * affected by localMatrix and does not imply scaling (only translation + * and cropping). If null, the tile rect is considered equal to the picture + * bounds. + * @return Returns a new shader object. Note: this function never returns null. + */ + sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, SkFilterMode mode, + const SkMatrix* localMatrix, const SkRect* tileRect) const; + + sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, SkFilterMode mode) const { + return this->makeShader(tmx, tmy, mode, nullptr, nullptr); + } + +private: + // Allowed subclasses. + SkPicture(); + friend class SkBigPicture; + friend class SkEmptyPicture; + friend class SkPicturePriv; + template <typename> friend class SkMiniPicture; + + void serialize(SkWStream*, const SkSerialProcs*, class SkRefCntSet* typefaces, + bool textBlobsOnly=false) const; + static sk_sp<SkPicture> MakeFromStream(SkStream*, const SkDeserialProcs*, + class SkTypefacePlayback*); + friend class SkPictureData; + + /** Return true if the SkStream/Buffer represents a serialized picture, and + fills out SkPictInfo. After this function returns, the data source is not + rewound so it will have to be manually reset before passing to + MakeFromStream or MakeFromBuffer. Note, MakeFromStream and + MakeFromBuffer perform this check internally so these entry points are + intended for stand alone tools. + If false is returned, SkPictInfo is unmodified. + */ + static bool StreamIsSKP(SkStream*, struct SkPictInfo*); + static bool BufferIsSKP(class SkReadBuffer*, struct SkPictInfo*); + friend bool SkPicture_StreamIsSKP(SkStream*, struct SkPictInfo*); + + // Returns NULL if this is not an SkBigPicture. + virtual const class SkBigPicture* asSkBigPicture() const { return nullptr; } + + friend struct SkPathCounter; + + static bool IsValidPictInfo(const struct SkPictInfo& info); + static sk_sp<SkPicture> Forwardport(const struct SkPictInfo&, + const class SkPictureData*, + class SkReadBuffer* buffer); + + struct SkPictInfo createHeader() const; + class SkPictureData* backport() const; + + uint32_t fUniqueID; + mutable std::atomic<bool> fAddedToCache{false}; +}; + +#endif diff --git a/src/deps/skia/include/core/SkPictureRecorder.h b/src/deps/skia/include/core/SkPictureRecorder.h new file mode 100644 index 000000000..9bc5d1aa1 --- /dev/null +++ b/src/deps/skia/include/core/SkPictureRecorder.h @@ -0,0 +1,115 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPictureRecorder_DEFINED +#define SkPictureRecorder_DEFINED + +#include "include/core/SkBBHFactory.h" +#include "include/core/SkPicture.h" +#include "include/core/SkRefCnt.h" + +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK +namespace android { + class Picture; +}; +#endif + +class SkCanvas; +class SkDrawable; +class SkMiniRecorder; +class SkPictureRecord; +class SkRecord; +class SkRecorder; + +class SK_API SkPictureRecorder { +public: + SkPictureRecorder(); + ~SkPictureRecorder(); + + enum FinishFlags { + }; + + /** Returns the canvas that records the drawing commands. + @param bounds the cull rect used when recording this picture. Any drawing the falls outside + of this rect is undefined, and may be drawn or it may not. + @param bbh optional acceleration structure + @param recordFlags optional flags that control recording. + @return the canvas. + */ + SkCanvas* beginRecording(const SkRect& bounds, sk_sp<SkBBoxHierarchy> bbh); + + SkCanvas* beginRecording(const SkRect& bounds, SkBBHFactory* bbhFactory = nullptr); + + SkCanvas* beginRecording(SkScalar width, SkScalar height, + SkBBHFactory* bbhFactory = nullptr) { + return this->beginRecording(SkRect::MakeWH(width, height), bbhFactory); + } + + /** Returns the recording canvas if one is active, or NULL if recording is + not active. This does not alter the refcnt on the canvas (if present). + */ + SkCanvas* getRecordingCanvas(); + + /** + * Signal that the caller is done recording. This invalidates the canvas returned by + * beginRecording/getRecordingCanvas. Ownership of the object is passed to the caller, who + * must call unref() when they are done using it. + * + * The returned picture is immutable. If during recording drawables were added to the canvas, + * these will have been "drawn" into a recording canvas, so that this resulting picture will + * reflect their current state, but will not contain a live reference to the drawables + * themselves. + */ + sk_sp<SkPicture> finishRecordingAsPicture(); + + /** + * Signal that the caller is done recording, and update the cull rect to use for bounding + * box hierarchy (BBH) generation. The behavior is the same as calling + * finishRecordingAsPicture(), except that this method updates the cull rect initially passed + * into beginRecording. + * @param cullRect the new culling rectangle to use as the overall bound for BBH generation + * and subsequent culling operations. + * @return the picture containing the recorded content. + */ + sk_sp<SkPicture> finishRecordingAsPictureWithCull(const SkRect& cullRect); + + /** + * Signal that the caller is done recording. This invalidates the canvas returned by + * beginRecording/getRecordingCanvas. Ownership of the object is passed to the caller, who + * must call unref() when they are done using it. + * + * Unlike finishRecordingAsPicture(), which returns an immutable picture, the returned drawable + * may contain live references to other drawables (if they were added to the recording canvas) + * and therefore this drawable will reflect the current state of those nested drawables anytime + * it is drawn or a new picture is snapped from it (by calling drawable->newPictureSnapshot()). + */ + sk_sp<SkDrawable> finishRecordingAsDrawable(); + +private: + void reset(); + + /** Replay the current (partially recorded) operation stream into + canvas. This call doesn't close the current recording. + */ +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + friend class android::Picture; +#endif + friend class SkPictureRecorderReplayTester; // for unit testing + void partialReplay(SkCanvas* canvas) const; + + bool fActivelyRecording; + SkRect fCullRect; + sk_sp<SkBBoxHierarchy> fBBH; + std::unique_ptr<SkRecorder> fRecorder; + sk_sp<SkRecord> fRecord; + std::unique_ptr<SkMiniRecorder> fMiniRecorder; + + SkPictureRecorder(SkPictureRecorder&&) = delete; + SkPictureRecorder& operator=(SkPictureRecorder&&) = delete; +}; + +#endif diff --git a/src/deps/skia/include/core/SkPixelRef.h b/src/deps/skia/include/core/SkPixelRef.h new file mode 100644 index 000000000..0a55ac339 --- /dev/null +++ b/src/deps/skia/include/core/SkPixelRef.h @@ -0,0 +1,123 @@ +/* + * Copyright 2008 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPixelRef_DEFINED +#define SkPixelRef_DEFINED + +#include "include/core/SkBitmap.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkPixmap.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkSize.h" +#include "include/private/SkIDChangeListener.h" +#include "include/private/SkMutex.h" +#include "include/private/SkTDArray.h" + +#include <atomic> + +struct SkIRect; + +class GrTexture; +class SkDiscardableMemory; + +/** \class SkPixelRef + + This class is the smart container for pixel memory, and is used with SkBitmap. + This class can be shared/accessed between multiple threads. +*/ +class SK_API SkPixelRef : public SkRefCnt { +public: + SkPixelRef(int width, int height, void* addr, size_t rowBytes); + ~SkPixelRef() override; + + SkISize dimensions() const { return {fWidth, fHeight}; } + int width() const { return fWidth; } + int height() const { return fHeight; } + void* pixels() const { return fPixels; } + size_t rowBytes() const { return fRowBytes; } + + /** Returns a non-zero, unique value corresponding to the pixels in this + pixelref. Each time the pixels are changed (and notifyPixelsChanged is + called), a different generation ID will be returned. + */ + uint32_t getGenerationID() const; + + /** + * Call this if you have changed the contents of the pixels. This will in- + * turn cause a different generation ID value to be returned from + * getGenerationID(). + */ + void notifyPixelsChanged(); + + /** Returns true if this pixelref is marked as immutable, meaning that the + contents of its pixels will not change for the lifetime of the pixelref. + */ + bool isImmutable() const { return fMutability != kMutable; } + + /** Marks this pixelref is immutable, meaning that the contents of its + pixels will not change for the lifetime of the pixelref. This state can + be set on a pixelref, but it cannot be cleared once it is set. + */ + void setImmutable(); + + // Register a listener that may be called the next time our generation ID changes. + // + // We'll only call the listener if we're confident that we are the only SkPixelRef with this + // generation ID. If our generation ID changes and we decide not to call the listener, we'll + // never call it: you must add a new listener for each generation ID change. We also won't call + // the listener when we're certain no one knows what our generation ID is. + // + // This can be used to invalidate caches keyed by SkPixelRef generation ID. + // Takes ownership of listener. Threadsafe. + void addGenIDChangeListener(sk_sp<SkIDChangeListener> listener); + + // Call when this pixelref is part of the key to a resourcecache entry. This allows the cache + // to know automatically those entries can be purged when this pixelref is changed or deleted. + void notifyAddedToCache() { + fAddedToCache.store(true); + } + + virtual SkDiscardableMemory* diagnostic_only_getDiscardable() const { return nullptr; } + +protected: + void android_only_reset(int width, int height, size_t rowBytes); + +private: + int fWidth; + int fHeight; + void* fPixels; + size_t fRowBytes; + + // Bottom bit indicates the Gen ID is unique. + bool genIDIsUnique() const { return SkToBool(fTaggedGenID.load() & 1); } + mutable std::atomic<uint32_t> fTaggedGenID; + + SkIDChangeListener::List fGenIDChangeListeners; + + // Set true by caches when they cache content that's derived from the current pixels. + std::atomic<bool> fAddedToCache; + + enum Mutability { + kMutable, // PixelRefs begin mutable. + kTemporarilyImmutable, // Considered immutable, but can revert to mutable. + kImmutable, // Once set to this state, it never leaves. + } fMutability : 8; // easily fits inside a byte + + void needsNewGenID(); + void callGenIDChangeListeners(); + + void setTemporarilyImmutable(); + void restoreMutability(); + friend class SkSurface_Raster; // For the two methods above. + + void setImmutableWithID(uint32_t genID); + friend void SkBitmapCache_setImmutableWithID(SkPixelRef*, uint32_t); + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/core/SkPixmap.h b/src/deps/skia/include/core/SkPixmap.h new file mode 100644 index 000000000..ba1268a20 --- /dev/null +++ b/src/deps/skia/include/core/SkPixmap.h @@ -0,0 +1,720 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPixmap_DEFINED +#define SkPixmap_DEFINED + +#include "include/core/SkColor.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkSamplingOptions.h" + +class SkData; +struct SkMask; + +/** \class SkPixmap + SkPixmap provides a utility to pair SkImageInfo with pixels and row bytes. + SkPixmap is a low level class which provides convenience functions to access + raster destinations. SkCanvas can not draw SkPixmap, nor does SkPixmap provide + a direct drawing destination. + + Use SkBitmap to draw pixels referenced by SkPixmap; use SkSurface to draw into + pixels referenced by SkPixmap. + + SkPixmap does not try to manage the lifetime of the pixel memory. Use SkPixelRef + to manage pixel memory; SkPixelRef is safe across threads. +*/ +class SK_API SkPixmap { +public: + + /** Creates an empty SkPixmap without pixels, with kUnknown_SkColorType, with + kUnknown_SkAlphaType, and with a width and height of zero. Use + reset() to associate pixels, SkColorType, SkAlphaType, width, and height + after SkPixmap has been created. + + @return empty SkPixmap + */ + SkPixmap() + : fPixels(nullptr), fRowBytes(0), fInfo(SkImageInfo::MakeUnknown(0, 0)) + {} + + /** Creates SkPixmap from info width, height, SkAlphaType, and SkColorType. + addr points to pixels, or nullptr. rowBytes should be info.width() times + info.bytesPerPixel(), or larger. + + No parameter checking is performed; it is up to the caller to ensure that + addr and rowBytes agree with info. + + The memory lifetime of pixels is managed by the caller. When SkPixmap goes + out of scope, addr is unaffected. + + SkPixmap may be later modified by reset() to change its size, pixel type, or + storage. + + @param info width, height, SkAlphaType, SkColorType of SkImageInfo + @param addr pointer to pixels allocated by caller; may be nullptr + @param rowBytes size of one row of addr; width times pixel size, or larger + @return initialized SkPixmap + */ + SkPixmap(const SkImageInfo& info, const void* addr, size_t rowBytes) + : fPixels(addr), fRowBytes(rowBytes), fInfo(info) + {} + + /** Sets width, height, row bytes to zero; pixel address to nullptr; SkColorType to + kUnknown_SkColorType; and SkAlphaType to kUnknown_SkAlphaType. + + The prior pixels are unaffected; it is up to the caller to release pixels + memory if desired. + + example: https://fiddle.skia.org/c/@Pixmap_reset + */ + void reset(); + + /** Sets width, height, SkAlphaType, and SkColorType from info. + Sets pixel address from addr, which may be nullptr. + Sets row bytes from rowBytes, which should be info.width() times + info.bytesPerPixel(), or larger. + + Does not check addr. Asserts if built with SK_DEBUG defined and if rowBytes is + too small to hold one row of pixels. + + The memory lifetime pixels are managed by the caller. When SkPixmap goes + out of scope, addr is unaffected. + + @param info width, height, SkAlphaType, SkColorType of SkImageInfo + @param addr pointer to pixels allocated by caller; may be nullptr + @param rowBytes size of one row of addr; width times pixel size, or larger + + example: https://fiddle.skia.org/c/@Pixmap_reset_2 + */ + void reset(const SkImageInfo& info, const void* addr, size_t rowBytes); + + /** Changes SkColorSpace in SkImageInfo; preserves width, height, SkAlphaType, and + SkColorType in SkImage, and leaves pixel address and row bytes unchanged. + SkColorSpace reference count is incremented. + + @param colorSpace SkColorSpace moved to SkImageInfo + + example: https://fiddle.skia.org/c/@Pixmap_setColorSpace + */ + void setColorSpace(sk_sp<SkColorSpace> colorSpace); + + /** Deprecated. + */ + bool SK_WARN_UNUSED_RESULT reset(const SkMask& mask); + + /** Sets subset width, height, pixel address to intersection of SkPixmap with area, + if intersection is not empty; and return true. Otherwise, leave subset unchanged + and return false. + + Failing to read the return value generates a compile time warning. + + @param subset storage for width, height, pixel address of intersection + @param area bounds to intersect with SkPixmap + @return true if intersection of SkPixmap and area is not empty + */ + bool SK_WARN_UNUSED_RESULT extractSubset(SkPixmap* subset, const SkIRect& area) const; + + /** Returns width, height, SkAlphaType, SkColorType, and SkColorSpace. + + @return reference to SkImageInfo + */ + const SkImageInfo& info() const { return fInfo; } + + /** Returns row bytes, the interval from one pixel row to the next. Row bytes + is at least as large as: width() * info().bytesPerPixel(). + + Returns zero if colorType() is kUnknown_SkColorType. + It is up to the SkBitmap creator to ensure that row bytes is a useful value. + + @return byte length of pixel row + */ + size_t rowBytes() const { return fRowBytes; } + + /** Returns pixel address, the base address corresponding to the pixel origin. + + It is up to the SkPixmap creator to ensure that pixel address is a useful value. + + @return pixel address + */ + const void* addr() const { return fPixels; } + + /** Returns pixel count in each pixel row. Should be equal or less than: + rowBytes() / info().bytesPerPixel(). + + @return pixel width in SkImageInfo + */ + int width() const { return fInfo.width(); } + + /** Returns pixel row count. + + @return pixel height in SkImageInfo + */ + int height() const { return fInfo.height(); } + + /** + * Return the dimensions of the pixmap (from its ImageInfo) + */ + SkISize dimensions() const { return fInfo.dimensions(); } + + SkColorType colorType() const { return fInfo.colorType(); } + + SkAlphaType alphaType() const { return fInfo.alphaType(); } + + /** Returns SkColorSpace, the range of colors, associated with SkImageInfo. The + reference count of SkColorSpace is unchanged. The returned SkColorSpace is + immutable. + + @return SkColorSpace in SkImageInfo, or nullptr + */ + SkColorSpace* colorSpace() const { return fInfo.colorSpace(); } + + /** Returns smart pointer to SkColorSpace, the range of colors, associated with + SkImageInfo. The smart pointer tracks the number of objects sharing this + SkColorSpace reference so the memory is released when the owners destruct. + + The returned SkColorSpace is immutable. + + @return SkColorSpace in SkImageInfo wrapped in a smart pointer + */ + sk_sp<SkColorSpace> refColorSpace() const { return fInfo.refColorSpace(); } + + /** Returns true if SkAlphaType is kOpaque_SkAlphaType. + Does not check if SkColorType allows alpha, or if any pixel value has + transparency. + + @return true if SkImageInfo has opaque SkAlphaType + */ + bool isOpaque() const { return fInfo.isOpaque(); } + + /** Returns SkIRect { 0, 0, width(), height() }. + + @return integral rectangle from origin to width() and height() + */ + SkIRect bounds() const { return SkIRect::MakeWH(this->width(), this->height()); } + + /** Returns number of pixels that fit on row. Should be greater than or equal to + width(). + + @return maximum pixels per row + */ + int rowBytesAsPixels() const { return int(fRowBytes >> this->shiftPerPixel()); } + + /** Returns bit shift converting row bytes to row pixels. + Returns zero for kUnknown_SkColorType. + + @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes + */ + int shiftPerPixel() const { return fInfo.shiftPerPixel(); } + + /** Returns minimum memory required for pixel storage. + Does not include unused memory on last row when rowBytesAsPixels() exceeds width(). + Returns SIZE_MAX if result does not fit in size_t. + Returns zero if height() or width() is 0. + Returns height() times rowBytes() if colorType() is kUnknown_SkColorType. + + @return size in bytes of image buffer + */ + size_t computeByteSize() const { return fInfo.computeByteSize(fRowBytes); } + + /** Returns true if all pixels are opaque. SkColorType determines how pixels + are encoded, and whether pixel describes alpha. Returns true for SkColorType + without alpha in each pixel; for other SkColorType, returns true if all + pixels have alpha values equivalent to 1.0 or greater. + + For SkColorType kRGB_565_SkColorType or kGray_8_SkColorType: always + returns true. For SkColorType kAlpha_8_SkColorType, kBGRA_8888_SkColorType, + kRGBA_8888_SkColorType: returns true if all pixel alpha values are 255. + For SkColorType kARGB_4444_SkColorType: returns true if all pixel alpha values are 15. + For kRGBA_F16_SkColorType: returns true if all pixel alpha values are 1.0 or + greater. + + Returns false for kUnknown_SkColorType. + + @return true if all pixels have opaque values or SkColorType is opaque + + example: https://fiddle.skia.org/c/@Pixmap_computeIsOpaque + */ + bool computeIsOpaque() const; + + /** Returns pixel at (x, y) as unpremultiplied color. + Returns black with alpha if SkColorType is kAlpha_8_SkColorType. + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined; and returns undefined values or may crash if + SK_RELEASE is defined. Fails if SkColorType is kUnknown_SkColorType or + pixel address is nullptr. + + SkColorSpace in SkImageInfo is ignored. Some color precision may be lost in the + conversion to unpremultiplied color; original pixel data may have additional + precision. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return pixel converted to unpremultiplied color + + example: https://fiddle.skia.org/c/@Pixmap_getColor + */ + SkColor getColor(int x, int y) const; + + /** Look up the pixel at (x,y) and return its alpha component, normalized to [0..1]. + This is roughly equivalent to SkGetColorA(getColor()), but can be more efficent + (and more precise if the pixels store more than 8 bits per component). + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return alpha converted to normalized float + */ + float getAlphaf(int x, int y) const; + + /** Returns readable pixel address at (x, y). Returns nullptr if SkPixelRef is nullptr. + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined. Returns nullptr if SkColorType is kUnknown_SkColorType. + + Performs a lookup of pixel size; for better performance, call + one of: addr8, addr16, addr32, addr64, or addrF16(). + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return readable generic pointer to pixel + */ + const void* addr(int x, int y) const { + return (const char*)fPixels + fInfo.computeOffset(x, y, fRowBytes); + } + + /** Returns readable base pixel address. Result is addressable as unsigned 8-bit bytes. + Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType or + kGray_8_SkColorType, and is built with SK_DEBUG defined. + + One byte corresponds to one pixel. + + @return readable unsigned 8-bit pointer to pixels + */ + const uint8_t* addr8() const { + SkASSERT(1 == fInfo.bytesPerPixel()); + return reinterpret_cast<const uint8_t*>(fPixels); + } + + /** Returns readable base pixel address. Result is addressable as unsigned 16-bit words. + Will trigger an assert() if SkColorType is not kRGB_565_SkColorType or + kARGB_4444_SkColorType, and is built with SK_DEBUG defined. + + One word corresponds to one pixel. + + @return readable unsigned 16-bit pointer to pixels + */ + const uint16_t* addr16() const { + SkASSERT(2 == fInfo.bytesPerPixel()); + return reinterpret_cast<const uint16_t*>(fPixels); + } + + /** Returns readable base pixel address. Result is addressable as unsigned 32-bit words. + Will trigger an assert() if SkColorType is not kRGBA_8888_SkColorType or + kBGRA_8888_SkColorType, and is built with SK_DEBUG defined. + + One word corresponds to one pixel. + + @return readable unsigned 32-bit pointer to pixels + */ + const uint32_t* addr32() const { + SkASSERT(4 == fInfo.bytesPerPixel()); + return reinterpret_cast<const uint32_t*>(fPixels); + } + + /** Returns readable base pixel address. Result is addressable as unsigned 64-bit words. + Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built + with SK_DEBUG defined. + + One word corresponds to one pixel. + + @return readable unsigned 64-bit pointer to pixels + */ + const uint64_t* addr64() const { + SkASSERT(8 == fInfo.bytesPerPixel()); + return reinterpret_cast<const uint64_t*>(fPixels); + } + + /** Returns readable base pixel address. Result is addressable as unsigned 16-bit words. + Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built + with SK_DEBUG defined. + + Each word represents one color component encoded as a half float. + Four words correspond to one pixel. + + @return readable unsigned 16-bit pointer to first component of pixels + */ + const uint16_t* addrF16() const { + SkASSERT(8 == fInfo.bytesPerPixel()); + SkASSERT(kRGBA_F16_SkColorType == fInfo.colorType() || + kRGBA_F16Norm_SkColorType == fInfo.colorType()); + return reinterpret_cast<const uint16_t*>(fPixels); + } + + /** Returns readable pixel address at (x, y). + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined. + + Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType or + kGray_8_SkColorType, and is built with SK_DEBUG defined. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return readable unsigned 8-bit pointer to pixel at (x, y) + */ + const uint8_t* addr8(int x, int y) const { + SkASSERT((unsigned)x < (unsigned)fInfo.width()); + SkASSERT((unsigned)y < (unsigned)fInfo.height()); + return (const uint8_t*)((const char*)this->addr8() + (size_t)y * fRowBytes + (x << 0)); + } + + /** Returns readable pixel address at (x, y). + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined. + + Will trigger an assert() if SkColorType is not kRGB_565_SkColorType or + kARGB_4444_SkColorType, and is built with SK_DEBUG defined. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return readable unsigned 16-bit pointer to pixel at (x, y) + */ + const uint16_t* addr16(int x, int y) const { + SkASSERT((unsigned)x < (unsigned)fInfo.width()); + SkASSERT((unsigned)y < (unsigned)fInfo.height()); + return (const uint16_t*)((const char*)this->addr16() + (size_t)y * fRowBytes + (x << 1)); + } + + /** Returns readable pixel address at (x, y). + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined. + + Will trigger an assert() if SkColorType is not kRGBA_8888_SkColorType or + kBGRA_8888_SkColorType, and is built with SK_DEBUG defined. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return readable unsigned 32-bit pointer to pixel at (x, y) + */ + const uint32_t* addr32(int x, int y) const { + SkASSERT((unsigned)x < (unsigned)fInfo.width()); + SkASSERT((unsigned)y < (unsigned)fInfo.height()); + return (const uint32_t*)((const char*)this->addr32() + (size_t)y * fRowBytes + (x << 2)); + } + + /** Returns readable pixel address at (x, y). + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined. + + Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built + with SK_DEBUG defined. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return readable unsigned 64-bit pointer to pixel at (x, y) + */ + const uint64_t* addr64(int x, int y) const { + SkASSERT((unsigned)x < (unsigned)fInfo.width()); + SkASSERT((unsigned)y < (unsigned)fInfo.height()); + return (const uint64_t*)((const char*)this->addr64() + (size_t)y * fRowBytes + (x << 3)); + } + + /** Returns readable pixel address at (x, y). + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined. + + Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built + with SK_DEBUG defined. + + Each unsigned 16-bit word represents one color component encoded as a half float. + Four words correspond to one pixel. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return readable unsigned 16-bit pointer to pixel component at (x, y) + */ + const uint16_t* addrF16(int x, int y) const { + SkASSERT(kRGBA_F16_SkColorType == fInfo.colorType() || + kRGBA_F16Norm_SkColorType == fInfo.colorType()); + return reinterpret_cast<const uint16_t*>(this->addr64(x, y)); + } + + /** Returns writable base pixel address. + + @return writable generic base pointer to pixels + */ + void* writable_addr() const { return const_cast<void*>(fPixels); } + + /** Returns writable pixel address at (x, y). + + Input is not validated: out of bounds values of x or y trigger an assert() if + built with SK_DEBUG defined. Returns zero if SkColorType is kUnknown_SkColorType. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return writable generic pointer to pixel + */ + void* writable_addr(int x, int y) const { + return const_cast<void*>(this->addr(x, y)); + } + + /** Returns writable pixel address at (x, y). Result is addressable as unsigned + 8-bit bytes. Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType + or kGray_8_SkColorType, and is built with SK_DEBUG defined. + + One byte corresponds to one pixel. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return writable unsigned 8-bit pointer to pixels + */ + uint8_t* writable_addr8(int x, int y) const { + return const_cast<uint8_t*>(this->addr8(x, y)); + } + + /** Returns writable_addr pixel address at (x, y). Result is addressable as unsigned + 16-bit words. Will trigger an assert() if SkColorType is not kRGB_565_SkColorType + or kARGB_4444_SkColorType, and is built with SK_DEBUG defined. + + One word corresponds to one pixel. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return writable unsigned 16-bit pointer to pixel + */ + uint16_t* writable_addr16(int x, int y) const { + return const_cast<uint16_t*>(this->addr16(x, y)); + } + + /** Returns writable pixel address at (x, y). Result is addressable as unsigned + 32-bit words. Will trigger an assert() if SkColorType is not + kRGBA_8888_SkColorType or kBGRA_8888_SkColorType, and is built with SK_DEBUG + defined. + + One word corresponds to one pixel. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return writable unsigned 32-bit pointer to pixel + */ + uint32_t* writable_addr32(int x, int y) const { + return const_cast<uint32_t*>(this->addr32(x, y)); + } + + /** Returns writable pixel address at (x, y). Result is addressable as unsigned + 64-bit words. Will trigger an assert() if SkColorType is not + kRGBA_F16_SkColorType and is built with SK_DEBUG defined. + + One word corresponds to one pixel. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return writable unsigned 64-bit pointer to pixel + */ + uint64_t* writable_addr64(int x, int y) const { + return const_cast<uint64_t*>(this->addr64(x, y)); + } + + /** Returns writable pixel address at (x, y). Result is addressable as unsigned + 16-bit words. Will trigger an assert() if SkColorType is not + kRGBA_F16_SkColorType and is built with SK_DEBUG defined. + + Each word represents one color component encoded as a half float. + Four words correspond to one pixel. + + @param x column index, zero or greater, and less than width() + @param y row index, zero or greater, and less than height() + @return writable unsigned 16-bit pointer to first component of pixel + */ + uint16_t* writable_addrF16(int x, int y) const { + return reinterpret_cast<uint16_t*>(writable_addr64(x, y)); + } + + /** Copies a SkRect of pixels to dstPixels. Copy starts at (0, 0), and does not + exceed SkPixmap (width(), height()). + + dstInfo specifies width, height, SkColorType, SkAlphaType, and + SkColorSpace of destination. dstRowBytes specifics the gap from one destination + row to the next. Returns true if pixels are copied. Returns false if + dstInfo address equals nullptr, or dstRowBytes is less than dstInfo.minRowBytes(). + + Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match. + If SkPixmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match. + If SkPixmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must + match. If SkPixmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns + false if pixel conversion is not possible. + + Returns false if SkPixmap width() or height() is zero or negative. + + @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace + @param dstPixels destination pixel storage + @param dstRowBytes destination row length + @return true if pixels are copied to dstPixels + */ + bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes) const { + return this->readPixels(dstInfo, dstPixels, dstRowBytes, 0, 0); + } + + /** Copies a SkRect of pixels to dstPixels. Copy starts at (srcX, srcY), and does not + exceed SkPixmap (width(), height()). + + dstInfo specifies width, height, SkColorType, SkAlphaType, and + SkColorSpace of destination. dstRowBytes specifics the gap from one destination + row to the next. Returns true if pixels are copied. Returns false if + dstInfo address equals nullptr, or dstRowBytes is less than dstInfo.minRowBytes(). + + Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match. + If SkPixmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match. + If SkPixmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must + match. If SkPixmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns + false if pixel conversion is not possible. + + srcX and srcY may be negative to copy only top or left of source. Returns + false if SkPixmap width() or height() is zero or negative. Returns false if: + abs(srcX) >= Pixmap width(), or if abs(srcY) >= Pixmap height(). + + @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace + @param dstPixels destination pixel storage + @param dstRowBytes destination row length + @param srcX column index whose absolute value is less than width() + @param srcY row index whose absolute value is less than height() + @return true if pixels are copied to dstPixels + */ + bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes, int srcX, + int srcY) const; + + /** Copies a SkRect of pixels to dst. Copy starts at (srcX, srcY), and does not + exceed SkPixmap (width(), height()). dst specifies width, height, SkColorType, + SkAlphaType, and SkColorSpace of destination. Returns true if pixels are copied. + Returns false if dst address equals nullptr, or dst.rowBytes() is less than + dst SkImageInfo::minRowBytes. + + Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.info().colorType must match. + If SkPixmap colorType() is kGray_8_SkColorType, dst.info().colorSpace must match. + If SkPixmap alphaType() is kOpaque_SkAlphaType, dst.info().alphaType must + match. If SkPixmap colorSpace() is nullptr, dst.info().colorSpace must match. Returns + false if pixel conversion is not possible. + + srcX and srcY may be negative to copy only top or left of source. Returns + false SkPixmap width() or height() is zero or negative. Returns false if: + abs(srcX) >= Pixmap width(), or if abs(srcY) >= Pixmap height(). + + @param dst SkImageInfo and pixel address to write to + @param srcX column index whose absolute value is less than width() + @param srcY row index whose absolute value is less than height() + @return true if pixels are copied to dst + */ + bool readPixels(const SkPixmap& dst, int srcX, int srcY) const { + return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), srcX, srcY); + } + + /** Copies pixels inside bounds() to dst. dst specifies width, height, SkColorType, + SkAlphaType, and SkColorSpace of destination. Returns true if pixels are copied. + Returns false if dst address equals nullptr, or dst.rowBytes() is less than + dst SkImageInfo::minRowBytes. + + Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match. + If SkPixmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match. + If SkPixmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must + match. If SkPixmap colorSpace() is nullptr, dst SkColorSpace must match. Returns + false if pixel conversion is not possible. + + Returns false if SkPixmap width() or height() is zero or negative. + + @param dst SkImageInfo and pixel address to write to + @return true if pixels are copied to dst + */ + bool readPixels(const SkPixmap& dst) const { + return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), 0, 0); + } + + /** Copies SkBitmap to dst, scaling pixels to fit dst.width() and dst.height(), and + converting pixels to match dst.colorType() and dst.alphaType(). Returns true if + pixels are copied. Returns false if dst address is nullptr, or dst.rowBytes() is + less than dst SkImageInfo::minRowBytes. + + Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is + kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match. + If SkPixmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match. + If SkPixmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must + match. If SkPixmap colorSpace() is nullptr, dst SkColorSpace must match. Returns + false if pixel conversion is not possible. + + Returns false if SkBitmap width() or height() is zero or negative. + + @param dst SkImageInfo and pixel address to write to + @return true if pixels are scaled to fit dst + + example: https://fiddle.skia.org/c/@Pixmap_scalePixels + */ + bool scalePixels(const SkPixmap& dst, const SkSamplingOptions&) const; + + /** Writes color to pixels bounded by subset; returns true on success. + Returns false if colorType() is kUnknown_SkColorType, or if subset does + not intersect bounds(). + + @param color sRGB unpremultiplied color to write + @param subset bounding integer SkRect of written pixels + @return true if pixels are changed + + example: https://fiddle.skia.org/c/@Pixmap_erase + */ + bool erase(SkColor color, const SkIRect& subset) const; + + /** Writes color to pixels inside bounds(); returns true on success. + Returns false if colorType() is kUnknown_SkColorType, or if bounds() + is empty. + + @param color sRGB unpremultiplied color to write + @return true if pixels are changed + */ + bool erase(SkColor color) const { return this->erase(color, this->bounds()); } + + /** Writes color to pixels bounded by subset; returns true on success. + if subset is nullptr, writes colors pixels inside bounds(). Returns false if + colorType() is kUnknown_SkColorType, if subset is not nullptr and does + not intersect bounds(), or if subset is nullptr and bounds() is empty. + + @param color sRGB unpremultiplied color to write + @param subset bounding integer SkRect of pixels to write; may be nullptr + @return true if pixels are changed + + example: https://fiddle.skia.org/c/@Pixmap_erase_3 + */ + bool erase(const SkColor4f& color, const SkIRect* subset = nullptr) const { + return this->erase(color, nullptr, subset); + } + + /** Writes color to pixels bounded by subset; returns true on success. + if subset is nullptr, writes colors pixels inside bounds(). Returns false if + colorType() is kUnknown_SkColorType, if subset is not nullptr and does + not intersect bounds(), or if subset is nullptr and bounds() is empty. + + @param color unpremultiplied color to write + @param cs SkColorSpace of color + @param subset bounding integer SkRect of pixels to write; may be nullptr + @return true if pixels are changed + */ + bool erase(const SkColor4f& color, SkColorSpace* cs, const SkIRect* subset = nullptr) const; + +private: + const void* fPixels; + size_t fRowBytes; + SkImageInfo fInfo; + + friend class SkPixmapPriv; +}; + +#endif diff --git a/src/deps/skia/include/core/SkPngChunkReader.h b/src/deps/skia/include/core/SkPngChunkReader.h new file mode 100644 index 000000000..0ee8a9ecc --- /dev/null +++ b/src/deps/skia/include/core/SkPngChunkReader.h @@ -0,0 +1,45 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPngChunkReader_DEFINED +#define SkPngChunkReader_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" + +/** + * SkPngChunkReader + * + * Base class for optional callbacks to retrieve meta/chunk data out of a PNG + * encoded image as it is being decoded. + * Used by SkCodec. + */ +class SkPngChunkReader : public SkRefCnt { +public: + /** + * This will be called by the decoder when it sees an unknown chunk. + * + * Use by SkCodec: + * Depending on the location of the unknown chunks, this callback may be + * called by + * - the factory (NewFromStream/NewFromData) + * - getPixels + * - startScanlineDecode + * - the first call to getScanlines/skipScanlines + * The callback may be called from a different thread (e.g. if the SkCodec + * is passed to another thread), and it may be called multiple times, if + * the SkCodec is used multiple times. + * + * @param tag Name for this type of chunk. + * @param data Data to be interpreted by the subclass. + * @param length Number of bytes of data in the chunk. + * @return true to continue decoding, or false to indicate an error, which + * will cause the decoder to not return the image. + */ + virtual bool readChunk(const char tag[], const void* data, size_t length) = 0; +}; +#endif // SkPngChunkReader_DEFINED diff --git a/src/deps/skia/include/core/SkPoint.h b/src/deps/skia/include/core/SkPoint.h new file mode 100644 index 000000000..92cb0b7f0 --- /dev/null +++ b/src/deps/skia/include/core/SkPoint.h @@ -0,0 +1,566 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPoint_DEFINED +#define SkPoint_DEFINED + +#include "include/core/SkMath.h" +#include "include/core/SkScalar.h" +#include "include/private/SkSafe32.h" + +struct SkIPoint; + +/** SkIVector provides an alternative name for SkIPoint. SkIVector and SkIPoint + can be used interchangeably for all purposes. +*/ +typedef SkIPoint SkIVector; + +/** \struct SkIPoint + SkIPoint holds two 32-bit integer coordinates. +*/ +struct SkIPoint { + int32_t fX; //!< x-axis value + int32_t fY; //!< y-axis value + + /** Sets fX to x, fY to y. + + @param x integer x-axis value of constructed SkIPoint + @param y integer y-axis value of constructed SkIPoint + @return SkIPoint (x, y) + */ + static constexpr SkIPoint Make(int32_t x, int32_t y) { + return {x, y}; + } + + /** Returns x-axis value of SkIPoint. + + @return fX + */ + constexpr int32_t x() const { return fX; } + + /** Returns y-axis value of SkIPoint. + + @return fY + */ + constexpr int32_t y() const { return fY; } + + /** Returns true if fX and fY are both zero. + + @return true if fX is zero and fY is zero + */ + bool isZero() const { return (fX | fY) == 0; } + + /** Sets fX to x and fY to y. + + @param x new value for fX + @param y new value for fY + */ + void set(int32_t x, int32_t y) { + fX = x; + fY = y; + } + + /** Returns SkIPoint changing the signs of fX and fY. + + @return SkIPoint as (-fX, -fY) + */ + SkIPoint operator-() const { + return {-fX, -fY}; + } + + /** Offsets SkIPoint by ivector v. Sets SkIPoint to (fX + v.fX, fY + v.fY). + + @param v ivector to add + */ + void operator+=(const SkIVector& v) { + fX = Sk32_sat_add(fX, v.fX); + fY = Sk32_sat_add(fY, v.fY); + } + + /** Subtracts ivector v from SkIPoint. Sets SkIPoint to: (fX - v.fX, fY - v.fY). + + @param v ivector to subtract + */ + void operator-=(const SkIVector& v) { + fX = Sk32_sat_sub(fX, v.fX); + fY = Sk32_sat_sub(fY, v.fY); + } + + /** Returns true if SkIPoint is equivalent to SkIPoint constructed from (x, y). + + @param x value compared with fX + @param y value compared with fY + @return true if SkIPoint equals (x, y) + */ + bool equals(int32_t x, int32_t y) const { + return fX == x && fY == y; + } + + /** Returns true if a is equivalent to b. + + @param a SkIPoint to compare + @param b SkIPoint to compare + @return true if a.fX == b.fX and a.fY == b.fY + */ + friend bool operator==(const SkIPoint& a, const SkIPoint& b) { + return a.fX == b.fX && a.fY == b.fY; + } + + /** Returns true if a is not equivalent to b. + + @param a SkIPoint to compare + @param b SkIPoint to compare + @return true if a.fX != b.fX or a.fY != b.fY + */ + friend bool operator!=(const SkIPoint& a, const SkIPoint& b) { + return a.fX != b.fX || a.fY != b.fY; + } + + /** Returns ivector from b to a; computed as (a.fX - b.fX, a.fY - b.fY). + + Can also be used to subtract ivector from ivector, returning ivector. + + @param a SkIPoint or ivector to subtract from + @param b ivector to subtract + @return ivector from b to a + */ + friend SkIVector operator-(const SkIPoint& a, const SkIPoint& b) { + return { Sk32_sat_sub(a.fX, b.fX), Sk32_sat_sub(a.fY, b.fY) }; + } + + /** Returns SkIPoint resulting from SkIPoint a offset by ivector b, computed as: + (a.fX + b.fX, a.fY + b.fY). + + Can also be used to offset SkIPoint b by ivector a, returning SkIPoint. + Can also be used to add ivector to ivector, returning ivector. + + @param a SkIPoint or ivector to add to + @param b SkIPoint or ivector to add + @return SkIPoint equal to a offset by b + */ + friend SkIPoint operator+(const SkIPoint& a, const SkIVector& b) { + return { Sk32_sat_add(a.fX, b.fX), Sk32_sat_add(a.fY, b.fY) }; + } +}; + +struct SkPoint; + +/** SkVector provides an alternative name for SkPoint. SkVector and SkPoint can + be used interchangeably for all purposes. +*/ +typedef SkPoint SkVector; + +/** \struct SkPoint + SkPoint holds two 32-bit floating point coordinates. +*/ +struct SK_API SkPoint { + SkScalar fX; //!< x-axis value + SkScalar fY; //!< y-axis value + + /** Sets fX to x, fY to y. Used both to set SkPoint and vector. + + @param x SkScalar x-axis value of constructed SkPoint or vector + @param y SkScalar y-axis value of constructed SkPoint or vector + @return SkPoint (x, y) + */ + static constexpr SkPoint Make(SkScalar x, SkScalar y) { + return {x, y}; + } + + /** Returns x-axis value of SkPoint or vector. + + @return fX + */ + constexpr SkScalar x() const { return fX; } + + /** Returns y-axis value of SkPoint or vector. + + @return fY + */ + constexpr SkScalar y() const { return fY; } + + /** Returns true if fX and fY are both zero. + + @return true if fX is zero and fY is zero + */ + bool isZero() const { return (0 == fX) & (0 == fY); } + + /** Sets fX to x and fY to y. + + @param x new value for fX + @param y new value for fY + */ + void set(SkScalar x, SkScalar y) { + fX = x; + fY = y; + } + + /** Sets fX to x and fY to y, promoting integers to SkScalar values. + + Assigning a large integer value directly to fX or fY may cause a compiler + error, triggered by narrowing conversion of int to SkScalar. This safely + casts x and y to avoid the error. + + @param x new value for fX + @param y new value for fY + */ + void iset(int32_t x, int32_t y) { + fX = SkIntToScalar(x); + fY = SkIntToScalar(y); + } + + /** Sets fX to p.fX and fY to p.fY, promoting integers to SkScalar values. + + Assigning an SkIPoint containing a large integer value directly to fX or fY may + cause a compiler error, triggered by narrowing conversion of int to SkScalar. + This safely casts p.fX and p.fY to avoid the error. + + @param p SkIPoint members promoted to SkScalar + */ + void iset(const SkIPoint& p) { + fX = SkIntToScalar(p.fX); + fY = SkIntToScalar(p.fY); + } + + /** Sets fX to absolute value of pt.fX; and fY to absolute value of pt.fY. + + @param pt members providing magnitude for fX and fY + */ + void setAbs(const SkPoint& pt) { + fX = SkScalarAbs(pt.fX); + fY = SkScalarAbs(pt.fY); + } + + /** Adds offset to each SkPoint in points array with count entries. + + @param points SkPoint array + @param count entries in array + @param offset vector added to points + */ + static void Offset(SkPoint points[], int count, const SkVector& offset) { + Offset(points, count, offset.fX, offset.fY); + } + + /** Adds offset (dx, dy) to each SkPoint in points array of length count. + + @param points SkPoint array + @param count entries in array + @param dx added to fX in points + @param dy added to fY in points + */ + static void Offset(SkPoint points[], int count, SkScalar dx, SkScalar dy) { + for (int i = 0; i < count; ++i) { + points[i].offset(dx, dy); + } + } + + /** Adds offset (dx, dy) to SkPoint. + + @param dx added to fX + @param dy added to fY + */ + void offset(SkScalar dx, SkScalar dy) { + fX += dx; + fY += dy; + } + + /** Returns the Euclidean distance from origin, computed as: + + sqrt(fX * fX + fY * fY) + + . + + @return straight-line distance to origin + */ + SkScalar length() const { return SkPoint::Length(fX, fY); } + + /** Returns the Euclidean distance from origin, computed as: + + sqrt(fX * fX + fY * fY) + + . + + @return straight-line distance to origin + */ + SkScalar distanceToOrigin() const { return this->length(); } + + /** Scales (fX, fY) so that length() returns one, while preserving ratio of fX to fY, + if possible. If prior length is nearly zero, sets vector to (0, 0) and returns + false; otherwise returns true. + + @return true if former length is not zero or nearly zero + + example: https://fiddle.skia.org/c/@Point_normalize_2 + */ + bool normalize(); + + /** Sets vector to (x, y) scaled so length() returns one, and so that + (fX, fY) is proportional to (x, y). If (x, y) length is nearly zero, + sets vector to (0, 0) and returns false; otherwise returns true. + + @param x proportional value for fX + @param y proportional value for fY + @return true if (x, y) length is not zero or nearly zero + + example: https://fiddle.skia.org/c/@Point_setNormalize + */ + bool setNormalize(SkScalar x, SkScalar y); + + /** Scales vector so that distanceToOrigin() returns length, if possible. If former + length is nearly zero, sets vector to (0, 0) and return false; otherwise returns + true. + + @param length straight-line distance to origin + @return true if former length is not zero or nearly zero + + example: https://fiddle.skia.org/c/@Point_setLength + */ + bool setLength(SkScalar length); + + /** Sets vector to (x, y) scaled to length, if possible. If former + length is nearly zero, sets vector to (0, 0) and return false; otherwise returns + true. + + @param x proportional value for fX + @param y proportional value for fY + @param length straight-line distance to origin + @return true if (x, y) length is not zero or nearly zero + + example: https://fiddle.skia.org/c/@Point_setLength_2 + */ + bool setLength(SkScalar x, SkScalar y, SkScalar length); + + /** Sets dst to SkPoint times scale. dst may be SkPoint to modify SkPoint in place. + + @param scale factor to multiply SkPoint by + @param dst storage for scaled SkPoint + + example: https://fiddle.skia.org/c/@Point_scale + */ + void scale(SkScalar scale, SkPoint* dst) const; + + /** Scales SkPoint in place by scale. + + @param value factor to multiply SkPoint by + */ + void scale(SkScalar value) { this->scale(value, this); } + + /** Changes the sign of fX and fY. + */ + void negate() { + fX = -fX; + fY = -fY; + } + + /** Returns SkPoint changing the signs of fX and fY. + + @return SkPoint as (-fX, -fY) + */ + SkPoint operator-() const { + return {-fX, -fY}; + } + + /** Adds vector v to SkPoint. Sets SkPoint to: (fX + v.fX, fY + v.fY). + + @param v vector to add + */ + void operator+=(const SkVector& v) { + fX += v.fX; + fY += v.fY; + } + + /** Subtracts vector v from SkPoint. Sets SkPoint to: (fX - v.fX, fY - v.fY). + + @param v vector to subtract + */ + void operator-=(const SkVector& v) { + fX -= v.fX; + fY -= v.fY; + } + + /** Returns SkPoint multiplied by scale. + + @param scale scalar to multiply by + @return SkPoint as (fX * scale, fY * scale) + */ + SkPoint operator*(SkScalar scale) const { + return {fX * scale, fY * scale}; + } + + /** Multiplies SkPoint by scale. Sets SkPoint to: (fX * scale, fY * scale). + + @param scale scalar to multiply by + @return reference to SkPoint + */ + SkPoint& operator*=(SkScalar scale) { + fX *= scale; + fY *= scale; + return *this; + } + + /** Returns true if both fX and fY are measurable values. + + @return true for values other than infinities and NaN + */ + bool isFinite() const { + SkScalar accum = 0; + accum *= fX; + accum *= fY; + + // accum is either NaN or it is finite (zero). + SkASSERT(0 == accum || SkScalarIsNaN(accum)); + + // value==value will be true iff value is not NaN + // TODO: is it faster to say !accum or accum==accum? + return !SkScalarIsNaN(accum); + } + + /** Returns true if SkPoint is equivalent to SkPoint constructed from (x, y). + + @param x value compared with fX + @param y value compared with fY + @return true if SkPoint equals (x, y) + */ + bool equals(SkScalar x, SkScalar y) const { + return fX == x && fY == y; + } + + /** Returns true if a is equivalent to b. + + @param a SkPoint to compare + @param b SkPoint to compare + @return true if a.fX == b.fX and a.fY == b.fY + */ + friend bool operator==(const SkPoint& a, const SkPoint& b) { + return a.fX == b.fX && a.fY == b.fY; + } + + /** Returns true if a is not equivalent to b. + + @param a SkPoint to compare + @param b SkPoint to compare + @return true if a.fX != b.fX or a.fY != b.fY + */ + friend bool operator!=(const SkPoint& a, const SkPoint& b) { + return a.fX != b.fX || a.fY != b.fY; + } + + /** Returns vector from b to a, computed as (a.fX - b.fX, a.fY - b.fY). + + Can also be used to subtract vector from SkPoint, returning SkPoint. + Can also be used to subtract vector from vector, returning vector. + + @param a SkPoint to subtract from + @param b SkPoint to subtract + @return vector from b to a + */ + friend SkVector operator-(const SkPoint& a, const SkPoint& b) { + return {a.fX - b.fX, a.fY - b.fY}; + } + + /** Returns SkPoint resulting from SkPoint a offset by vector b, computed as: + (a.fX + b.fX, a.fY + b.fY). + + Can also be used to offset SkPoint b by vector a, returning SkPoint. + Can also be used to add vector to vector, returning vector. + + @param a SkPoint or vector to add to + @param b SkPoint or vector to add + @return SkPoint equal to a offset by b + */ + friend SkPoint operator+(const SkPoint& a, const SkVector& b) { + return {a.fX + b.fX, a.fY + b.fY}; + } + + /** Returns the Euclidean distance from origin, computed as: + + sqrt(x * x + y * y) + + . + + @param x component of length + @param y component of length + @return straight-line distance to origin + + example: https://fiddle.skia.org/c/@Point_Length + */ + static SkScalar Length(SkScalar x, SkScalar y); + + /** Scales (vec->fX, vec->fY) so that length() returns one, while preserving ratio of vec->fX + to vec->fY, if possible. If original length is nearly zero, sets vec to (0, 0) and returns + zero; otherwise, returns length of vec before vec is scaled. + + Returned prior length may be SK_ScalarInfinity if it can not be represented by SkScalar. + + Note that normalize() is faster if prior length is not required. + + @param vec normalized to unit length + @return original vec length + + example: https://fiddle.skia.org/c/@Point_Normalize + */ + static SkScalar Normalize(SkVector* vec); + + /** Returns the Euclidean distance between a and b. + + @param a line end point + @param b line end point + @return straight-line distance from a to b + */ + static SkScalar Distance(const SkPoint& a, const SkPoint& b) { + return Length(a.fX - b.fX, a.fY - b.fY); + } + + /** Returns the dot product of vector a and vector b. + + @param a left side of dot product + @param b right side of dot product + @return product of input magnitudes and cosine of the angle between them + */ + static SkScalar DotProduct(const SkVector& a, const SkVector& b) { + return a.fX * b.fX + a.fY * b.fY; + } + + /** Returns the cross product of vector a and vector b. + + a and b form three-dimensional vectors with z-axis value equal to zero. The + cross product is a three-dimensional vector with x-axis and y-axis values equal + to zero. The cross product z-axis component is returned. + + @param a left side of cross product + @param b right side of cross product + @return area spanned by vectors signed by angle direction + */ + static SkScalar CrossProduct(const SkVector& a, const SkVector& b) { + return a.fX * b.fY - a.fY * b.fX; + } + + /** Returns the cross product of vector and vec. + + Vector and vec form three-dimensional vectors with z-axis value equal to zero. + The cross product is a three-dimensional vector with x-axis and y-axis values + equal to zero. The cross product z-axis component is returned. + + @param vec right side of cross product + @return area spanned by vectors signed by angle direction + */ + SkScalar cross(const SkVector& vec) const { + return CrossProduct(*this, vec); + } + + /** Returns the dot product of vector and vector vec. + + @param vec right side of dot product + @return product of input magnitudes and cosine of the angle between them + */ + SkScalar dot(const SkVector& vec) const { + return DotProduct(*this, vec); + } + +}; + +#endif diff --git a/src/deps/skia/include/core/SkPoint3.h b/src/deps/skia/include/core/SkPoint3.h new file mode 100644 index 000000000..e372f8279 --- /dev/null +++ b/src/deps/skia/include/core/SkPoint3.h @@ -0,0 +1,157 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPoint3_DEFINED +#define SkPoint3_DEFINED + +#include "include/core/SkPoint.h" + +struct SK_API SkPoint3 { + SkScalar fX, fY, fZ; + + static SkPoint3 Make(SkScalar x, SkScalar y, SkScalar z) { + SkPoint3 pt; + pt.set(x, y, z); + return pt; + } + + SkScalar x() const { return fX; } + SkScalar y() const { return fY; } + SkScalar z() const { return fZ; } + + void set(SkScalar x, SkScalar y, SkScalar z) { fX = x; fY = y; fZ = z; } + + friend bool operator==(const SkPoint3& a, const SkPoint3& b) { + return a.fX == b.fX && a.fY == b.fY && a.fZ == b.fZ; + } + + friend bool operator!=(const SkPoint3& a, const SkPoint3& b) { + return !(a == b); + } + + /** Returns the Euclidian distance from (0,0,0) to (x,y,z) + */ + static SkScalar Length(SkScalar x, SkScalar y, SkScalar z); + + /** Return the Euclidian distance from (0,0,0) to the point + */ + SkScalar length() const { return SkPoint3::Length(fX, fY, fZ); } + + /** Set the point (vector) to be unit-length in the same direction as it + already points. If the point has a degenerate length (i.e., nearly 0) + then set it to (0,0,0) and return false; otherwise return true. + */ + bool normalize(); + + /** Return a new point whose X, Y and Z coordinates are scaled. + */ + SkPoint3 makeScale(SkScalar scale) const { + SkPoint3 p; + p.set(scale * fX, scale * fY, scale * fZ); + return p; + } + + /** Scale the point's coordinates by scale. + */ + void scale(SkScalar value) { + fX *= value; + fY *= value; + fZ *= value; + } + + /** Return a new point whose X, Y and Z coordinates are the negative of the + original point's + */ + SkPoint3 operator-() const { + SkPoint3 neg; + neg.fX = -fX; + neg.fY = -fY; + neg.fZ = -fZ; + return neg; + } + + /** Returns a new point whose coordinates are the difference between + a and b (i.e., a - b) + */ + friend SkPoint3 operator-(const SkPoint3& a, const SkPoint3& b) { + return { a.fX - b.fX, a.fY - b.fY, a.fZ - b.fZ }; + } + + /** Returns a new point whose coordinates are the sum of a and b (a + b) + */ + friend SkPoint3 operator+(const SkPoint3& a, const SkPoint3& b) { + return { a.fX + b.fX, a.fY + b.fY, a.fZ + b.fZ }; + } + + /** Add v's coordinates to the point's + */ + void operator+=(const SkPoint3& v) { + fX += v.fX; + fY += v.fY; + fZ += v.fZ; + } + + /** Subtract v's coordinates from the point's + */ + void operator-=(const SkPoint3& v) { + fX -= v.fX; + fY -= v.fY; + fZ -= v.fZ; + } + + friend SkPoint3 operator*(SkScalar t, SkPoint3 p) { + return { t * p.fX, t * p.fY, t * p.fZ }; + } + + /** Returns true if fX, fY, and fZ are measurable values. + + @return true for values other than infinities and NaN + */ + bool isFinite() const { + SkScalar accum = 0; + accum *= fX; + accum *= fY; + accum *= fZ; + + // accum is either NaN or it is finite (zero). + SkASSERT(0 == accum || SkScalarIsNaN(accum)); + + // value==value will be true iff value is not NaN + // TODO: is it faster to say !accum or accum==accum? + return !SkScalarIsNaN(accum); + } + + /** Returns the dot product of a and b, treating them as 3D vectors + */ + static SkScalar DotProduct(const SkPoint3& a, const SkPoint3& b) { + return a.fX * b.fX + a.fY * b.fY + a.fZ * b.fZ; + } + + SkScalar dot(const SkPoint3& vec) const { + return DotProduct(*this, vec); + } + + /** Returns the cross product of a and b, treating them as 3D vectors + */ + static SkPoint3 CrossProduct(const SkPoint3& a, const SkPoint3& b) { + SkPoint3 result; + result.fX = a.fY*b.fZ - a.fZ*b.fY; + result.fY = a.fZ*b.fX - a.fX*b.fZ; + result.fZ = a.fX*b.fY - a.fY*b.fX; + + return result; + } + + SkPoint3 cross(const SkPoint3& vec) const { + return CrossProduct(*this, vec); + } +}; + +typedef SkPoint3 SkVector3; +typedef SkPoint3 SkColor3f; + +#endif diff --git a/src/deps/skia/include/core/SkPromiseImageTexture.h b/src/deps/skia/include/core/SkPromiseImageTexture.h new file mode 100644 index 000000000..05434c094 --- /dev/null +++ b/src/deps/skia/include/core/SkPromiseImageTexture.h @@ -0,0 +1,46 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPromiseImageTexture_DEFINED +#define SkPromiseImageTexture_DEFINED + +#include "include/core/SkTypes.h" + +#if SK_SUPPORT_GPU +#include "include/core/SkRefCnt.h" +#include "include/gpu/GrBackendSurface.h" +/** + * This type is used to fulfill textures for PromiseImages. Once an instance is returned from a + * PromiseImageTextureFulfillProc the GrBackendTexture it wraps must remain valid until the + * corresponding PromiseImageTextureReleaseProc is called. + */ +class SK_API SkPromiseImageTexture : public SkNVRefCnt<SkPromiseImageTexture> { +public: + SkPromiseImageTexture() = delete; + SkPromiseImageTexture(const SkPromiseImageTexture&) = delete; + SkPromiseImageTexture(SkPromiseImageTexture&&) = delete; + ~SkPromiseImageTexture(); + SkPromiseImageTexture& operator=(const SkPromiseImageTexture&) = delete; + SkPromiseImageTexture& operator=(SkPromiseImageTexture&&) = delete; + + static sk_sp<SkPromiseImageTexture> Make(const GrBackendTexture& backendTexture) { + if (!backendTexture.isValid()) { + return nullptr; + } + return sk_sp<SkPromiseImageTexture>(new SkPromiseImageTexture(backendTexture)); + } + + GrBackendTexture backendTexture() const { return fBackendTexture; } + +private: + explicit SkPromiseImageTexture(const GrBackendTexture& backendTexture); + + GrBackendTexture fBackendTexture; +}; +#endif // SK_SUPPORT_GPU + +#endif // SkPromiseImageTexture_DEFINED diff --git a/src/deps/skia/include/core/SkRRect.h b/src/deps/skia/include/core/SkRRect.h new file mode 100644 index 000000000..099385168 --- /dev/null +++ b/src/deps/skia/include/core/SkRRect.h @@ -0,0 +1,512 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRRect_DEFINED +#define SkRRect_DEFINED + +#include "include/core/SkPoint.h" +#include "include/core/SkRect.h" + +class SkPath; +class SkMatrix; +class SkString; + +/** \class SkRRect + SkRRect describes a rounded rectangle with a bounds and a pair of radii for each corner. + The bounds and radii can be set so that SkRRect describes: a rectangle with sharp corners; + a circle; an oval; or a rectangle with one or more rounded corners. + + SkRRect allows implementing CSS properties that describe rounded corners. + SkRRect may have up to eight different radii, one for each axis on each of its four + corners. + + SkRRect may modify the provided parameters when initializing bounds and radii. + If either axis radii is zero or less: radii are stored as zero; corner is square. + If corner curves overlap, radii are proportionally reduced to fit within bounds. +*/ +class SK_API SkRRect { +public: + + /** Initializes bounds at (0, 0), the origin, with zero width and height. + Initializes corner radii to (0, 0), and sets type of kEmpty_Type. + + @return empty SkRRect + */ + SkRRect() = default; + + /** Initializes to copy of rrect bounds and corner radii. + + @param rrect bounds and corner to copy + @return copy of rrect + */ + SkRRect(const SkRRect& rrect) = default; + + /** Copies rrect bounds and corner radii. + + @param rrect bounds and corner to copy + @return copy of rrect + */ + SkRRect& operator=(const SkRRect& rrect) = default; + + /** \enum SkRRect::Type + Type describes possible specializations of SkRRect. Each Type is + exclusive; a SkRRect may only have one type. + + Type members become progressively less restrictive; larger values of + Type have more degrees of freedom than smaller values. + */ + enum Type { + kEmpty_Type, //!< zero width or height + kRect_Type, //!< non-zero width and height, and zeroed radii + kOval_Type, //!< non-zero width and height filled with radii + kSimple_Type, //!< non-zero width and height with equal radii + kNinePatch_Type, //!< non-zero width and height with axis-aligned radii + kComplex_Type, //!< non-zero width and height with arbitrary radii + kLastType = kComplex_Type, //!< largest Type value + }; + + Type getType() const { + SkASSERT(this->isValid()); + return static_cast<Type>(fType); + } + + Type type() const { return this->getType(); } + + inline bool isEmpty() const { return kEmpty_Type == this->getType(); } + inline bool isRect() const { return kRect_Type == this->getType(); } + inline bool isOval() const { return kOval_Type == this->getType(); } + inline bool isSimple() const { return kSimple_Type == this->getType(); } + inline bool isNinePatch() const { return kNinePatch_Type == this->getType(); } + inline bool isComplex() const { return kComplex_Type == this->getType(); } + + /** Returns span on the x-axis. This does not check if result fits in 32-bit float; + result may be infinity. + + @return rect().fRight minus rect().fLeft + */ + SkScalar width() const { return fRect.width(); } + + /** Returns span on the y-axis. This does not check if result fits in 32-bit float; + result may be infinity. + + @return rect().fBottom minus rect().fTop + */ + SkScalar height() const { return fRect.height(); } + + /** Returns top-left corner radii. If type() returns kEmpty_Type, kRect_Type, + kOval_Type, or kSimple_Type, returns a value representative of all corner radii. + If type() returns kNinePatch_Type or kComplex_Type, at least one of the + remaining three corners has a different value. + + @return corner radii for simple types + */ + SkVector getSimpleRadii() const { + return fRadii[0]; + } + + /** Sets bounds to zero width and height at (0, 0), the origin. Sets + corner radii to zero and sets type to kEmpty_Type. + */ + void setEmpty() { *this = SkRRect(); } + + /** Sets bounds to sorted rect, and sets corner radii to zero. + If set bounds has width and height, and sets type to kRect_Type; + otherwise, sets type to kEmpty_Type. + + @param rect bounds to set + */ + void setRect(const SkRect& rect) { + if (!this->initializeRect(rect)) { + return; + } + + memset(fRadii, 0, sizeof(fRadii)); + fType = kRect_Type; + + SkASSERT(this->isValid()); + } + + /** Initializes bounds at (0, 0), the origin, with zero width and height. + Initializes corner radii to (0, 0), and sets type of kEmpty_Type. + + @return empty SkRRect + */ + static SkRRect MakeEmpty() { return SkRRect(); } + + /** Initializes to copy of r bounds and zeroes corner radii. + + @param r bounds to copy + @return copy of r + */ + static SkRRect MakeRect(const SkRect& r) { + SkRRect rr; + rr.setRect(r); + return rr; + } + + /** Sets bounds to oval, x-axis radii to half oval.width(), and all y-axis radii + to half oval.height(). If oval bounds is empty, sets to kEmpty_Type. + Otherwise, sets to kOval_Type. + + @param oval bounds of oval + @return oval + */ + static SkRRect MakeOval(const SkRect& oval) { + SkRRect rr; + rr.setOval(oval); + return rr; + } + + /** Sets to rounded rectangle with the same radii for all four corners. + If rect is empty, sets to kEmpty_Type. + Otherwise, if xRad and yRad are zero, sets to kRect_Type. + Otherwise, if xRad is at least half rect.width() and yRad is at least half + rect.height(), sets to kOval_Type. + Otherwise, sets to kSimple_Type. + + @param rect bounds of rounded rectangle + @param xRad x-axis radius of corners + @param yRad y-axis radius of corners + @return rounded rectangle + */ + static SkRRect MakeRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad) { + SkRRect rr; + rr.setRectXY(rect, xRad, yRad); + return rr; + } + + /** Sets bounds to oval, x-axis radii to half oval.width(), and all y-axis radii + to half oval.height(). If oval bounds is empty, sets to kEmpty_Type. + Otherwise, sets to kOval_Type. + + @param oval bounds of oval + */ + void setOval(const SkRect& oval); + + /** Sets to rounded rectangle with the same radii for all four corners. + If rect is empty, sets to kEmpty_Type. + Otherwise, if xRad or yRad is zero, sets to kRect_Type. + Otherwise, if xRad is at least half rect.width() and yRad is at least half + rect.height(), sets to kOval_Type. + Otherwise, sets to kSimple_Type. + + @param rect bounds of rounded rectangle + @param xRad x-axis radius of corners + @param yRad y-axis radius of corners + + example: https://fiddle.skia.org/c/@RRect_setRectXY + */ + void setRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad); + + /** Sets bounds to rect. Sets radii to (leftRad, topRad), (rightRad, topRad), + (rightRad, bottomRad), (leftRad, bottomRad). + + If rect is empty, sets to kEmpty_Type. + Otherwise, if leftRad and rightRad are zero, sets to kRect_Type. + Otherwise, if topRad and bottomRad are zero, sets to kRect_Type. + Otherwise, if leftRad and rightRad are equal and at least half rect.width(), and + topRad and bottomRad are equal at least half rect.height(), sets to kOval_Type. + Otherwise, if leftRad and rightRad are equal, and topRad and bottomRad are equal, + sets to kSimple_Type. Otherwise, sets to kNinePatch_Type. + + Nine patch refers to the nine parts defined by the radii: one center rectangle, + four edge patches, and four corner patches. + + @param rect bounds of rounded rectangle + @param leftRad left-top and left-bottom x-axis radius + @param topRad left-top and right-top y-axis radius + @param rightRad right-top and right-bottom x-axis radius + @param bottomRad left-bottom and right-bottom y-axis radius + */ + void setNinePatch(const SkRect& rect, SkScalar leftRad, SkScalar topRad, + SkScalar rightRad, SkScalar bottomRad); + + /** Sets bounds to rect. Sets radii array for individual control of all for corners. + + If rect is empty, sets to kEmpty_Type. + Otherwise, if one of each corner radii are zero, sets to kRect_Type. + Otherwise, if all x-axis radii are equal and at least half rect.width(), and + all y-axis radii are equal at least half rect.height(), sets to kOval_Type. + Otherwise, if all x-axis radii are equal, and all y-axis radii are equal, + sets to kSimple_Type. Otherwise, sets to kNinePatch_Type. + + @param rect bounds of rounded rectangle + @param radii corner x-axis and y-axis radii + + example: https://fiddle.skia.org/c/@RRect_setRectRadii + */ + void setRectRadii(const SkRect& rect, const SkVector radii[4]); + + /** \enum SkRRect::Corner + The radii are stored: top-left, top-right, bottom-right, bottom-left. + */ + enum Corner { + kUpperLeft_Corner, //!< index of top-left corner radii + kUpperRight_Corner, //!< index of top-right corner radii + kLowerRight_Corner, //!< index of bottom-right corner radii + kLowerLeft_Corner, //!< index of bottom-left corner radii + }; + + /** Returns bounds. Bounds may have zero width or zero height. Bounds right is + greater than or equal to left; bounds bottom is greater than or equal to top. + Result is identical to getBounds(). + + @return bounding box + */ + const SkRect& rect() const { return fRect; } + + /** Returns scalar pair for radius of curve on x-axis and y-axis for one corner. + Both radii may be zero. If not zero, both are positive and finite. + + @return x-axis and y-axis radii for one corner + */ + SkVector radii(Corner corner) const { return fRadii[corner]; } + + /** Returns bounds. Bounds may have zero width or zero height. Bounds right is + greater than or equal to left; bounds bottom is greater than or equal to top. + Result is identical to rect(). + + @return bounding box + */ + const SkRect& getBounds() const { return fRect; } + + /** Returns true if bounds and radii in a are equal to bounds and radii in b. + + a and b are not equal if either contain NaN. a and b are equal if members + contain zeroes with different signs. + + @param a SkRect bounds and radii to compare + @param b SkRect bounds and radii to compare + @return true if members are equal + */ + friend bool operator==(const SkRRect& a, const SkRRect& b) { + return a.fRect == b.fRect && SkScalarsEqual(&a.fRadii[0].fX, &b.fRadii[0].fX, 8); + } + + /** Returns true if bounds and radii in a are not equal to bounds and radii in b. + + a and b are not equal if either contain NaN. a and b are equal if members + contain zeroes with different signs. + + @param a SkRect bounds and radii to compare + @param b SkRect bounds and radii to compare + @return true if members are not equal + */ + friend bool operator!=(const SkRRect& a, const SkRRect& b) { + return a.fRect != b.fRect || !SkScalarsEqual(&a.fRadii[0].fX, &b.fRadii[0].fX, 8); + } + + /** Copies SkRRect to dst, then insets dst bounds by dx and dy, and adjusts dst + radii by dx and dy. dx and dy may be positive, negative, or zero. dst may be + SkRRect. + + If either corner radius is zero, the corner has no curvature and is unchanged. + Otherwise, if adjusted radius becomes negative, pins radius to zero. + If dx exceeds half dst bounds width, dst bounds left and right are set to + bounds x-axis center. If dy exceeds half dst bounds height, dst bounds top and + bottom are set to bounds y-axis center. + + If dx or dy cause the bounds to become infinite, dst bounds is zeroed. + + @param dx added to rect().fLeft, and subtracted from rect().fRight + @param dy added to rect().fTop, and subtracted from rect().fBottom + @param dst insets bounds and radii + + example: https://fiddle.skia.org/c/@RRect_inset + */ + void inset(SkScalar dx, SkScalar dy, SkRRect* dst) const; + + /** Insets bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be + positive, negative, or zero. + + If either corner radius is zero, the corner has no curvature and is unchanged. + Otherwise, if adjusted radius becomes negative, pins radius to zero. + If dx exceeds half bounds width, bounds left and right are set to + bounds x-axis center. If dy exceeds half bounds height, bounds top and + bottom are set to bounds y-axis center. + + If dx or dy cause the bounds to become infinite, bounds is zeroed. + + @param dx added to rect().fLeft, and subtracted from rect().fRight + @param dy added to rect().fTop, and subtracted from rect().fBottom + */ + void inset(SkScalar dx, SkScalar dy) { + this->inset(dx, dy, this); + } + + /** Outsets dst bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be + positive, negative, or zero. + + If either corner radius is zero, the corner has no curvature and is unchanged. + Otherwise, if adjusted radius becomes negative, pins radius to zero. + If dx exceeds half dst bounds width, dst bounds left and right are set to + bounds x-axis center. If dy exceeds half dst bounds height, dst bounds top and + bottom are set to bounds y-axis center. + + If dx or dy cause the bounds to become infinite, dst bounds is zeroed. + + @param dx subtracted from rect().fLeft, and added to rect().fRight + @param dy subtracted from rect().fTop, and added to rect().fBottom + @param dst outset bounds and radii + */ + void outset(SkScalar dx, SkScalar dy, SkRRect* dst) const { + this->inset(-dx, -dy, dst); + } + + /** Outsets bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be + positive, negative, or zero. + + If either corner radius is zero, the corner has no curvature and is unchanged. + Otherwise, if adjusted radius becomes negative, pins radius to zero. + If dx exceeds half bounds width, bounds left and right are set to + bounds x-axis center. If dy exceeds half bounds height, bounds top and + bottom are set to bounds y-axis center. + + If dx or dy cause the bounds to become infinite, bounds is zeroed. + + @param dx subtracted from rect().fLeft, and added to rect().fRight + @param dy subtracted from rect().fTop, and added to rect().fBottom + */ + void outset(SkScalar dx, SkScalar dy) { + this->inset(-dx, -dy, this); + } + + /** Translates SkRRect by (dx, dy). + + @param dx offset added to rect().fLeft and rect().fRight + @param dy offset added to rect().fTop and rect().fBottom + */ + void offset(SkScalar dx, SkScalar dy) { + fRect.offset(dx, dy); + } + + /** Returns SkRRect translated by (dx, dy). + + @param dx offset added to rect().fLeft and rect().fRight + @param dy offset added to rect().fTop and rect().fBottom + @return SkRRect bounds offset by (dx, dy), with unchanged corner radii + */ + SkRRect SK_WARN_UNUSED_RESULT makeOffset(SkScalar dx, SkScalar dy) const { + return SkRRect(fRect.makeOffset(dx, dy), fRadii, fType); + } + + /** Returns true if rect is inside the bounds and corner radii, and if + SkRRect and rect are not empty. + + @param rect area tested for containment + @return true if SkRRect contains rect + + example: https://fiddle.skia.org/c/@RRect_contains + */ + bool contains(const SkRect& rect) const; + + /** Returns true if bounds and radii values are finite and describe a SkRRect + SkRRect::Type that matches getType(). All SkRRect methods construct valid types, + even if the input values are not valid. Invalid SkRRect data can only + be generated by corrupting memory. + + @return true if bounds and radii match type() + + example: https://fiddle.skia.org/c/@RRect_isValid + */ + bool isValid() const; + + static constexpr size_t kSizeInMemory = 12 * sizeof(SkScalar); + + /** Writes SkRRect to buffer. Writes kSizeInMemory bytes, and returns + kSizeInMemory, the number of bytes written. + + @param buffer storage for SkRRect + @return bytes written, kSizeInMemory + + example: https://fiddle.skia.org/c/@RRect_writeToMemory + */ + size_t writeToMemory(void* buffer) const; + + /** Reads SkRRect from buffer, reading kSizeInMemory bytes. + Returns kSizeInMemory, bytes read if length is at least kSizeInMemory. + Otherwise, returns zero. + + @param buffer memory to read from + @param length size of buffer + @return bytes read, or 0 if length is less than kSizeInMemory + + example: https://fiddle.skia.org/c/@RRect_readFromMemory + */ + size_t readFromMemory(const void* buffer, size_t length); + + /** Transforms by SkRRect by matrix, storing result in dst. + Returns true if SkRRect transformed can be represented by another SkRRect. + Returns false if matrix contains transformations that are not axis aligned. + + Asserts in debug builds if SkRRect equals dst. + + @param matrix SkMatrix specifying the transform + @param dst SkRRect to store the result + @return true if transformation succeeded. + + example: https://fiddle.skia.org/c/@RRect_transform + */ + bool transform(const SkMatrix& matrix, SkRRect* dst) const; + + /** Writes text representation of SkRRect to standard output. + Set asHex true to generate exact binary representations + of floating point numbers. + + @param asHex true if SkScalar values are written as hexadecimal + + example: https://fiddle.skia.org/c/@RRect_dump + */ + void dump(bool asHex) const; + SkString dumpToString(bool asHex) const; + + /** Writes text representation of SkRRect to standard output. The representation + may be directly compiled as C++ code. Floating point values are written + with limited precision; it may not be possible to reconstruct original + SkRRect from output. + */ + void dump() const { this->dump(false); } + + /** Writes text representation of SkRRect to standard output. The representation + may be directly compiled as C++ code. Floating point values are written + in hexadecimal to preserve their exact bit pattern. The output reconstructs the + original SkRRect. + */ + void dumpHex() const { this->dump(true); } + +private: + static bool AreRectAndRadiiValid(const SkRect&, const SkVector[4]); + + SkRRect(const SkRect& rect, const SkVector radii[4], int32_t type) + : fRect(rect) + , fRadii{radii[0], radii[1], radii[2], radii[3]} + , fType(type) {} + + /** + * Initializes fRect. If the passed in rect is not finite or empty the rrect will be fully + * initialized and false is returned. Otherwise, just fRect is initialized and true is returned. + */ + bool initializeRect(const SkRect&); + + void computeType(); + bool checkCornerContainment(SkScalar x, SkScalar y) const; + // Returns true if the radii had to be scaled to fit rect + bool scaleRadii(); + + SkRect fRect = SkRect::MakeEmpty(); + // Radii order is UL, UR, LR, LL. Use Corner enum to index into fRadii[] + SkVector fRadii[4] = {{0, 0}, {0, 0}, {0,0}, {0,0}}; + // use an explicitly sized type so we're sure the class is dense (no uninitialized bytes) + int32_t fType = kEmpty_Type; + // TODO: add padding so we can use memcpy for flattening and not copy uninitialized data + + // to access fRadii directly + friend class SkPath; + friend class SkRRectPriv; +}; + +#endif diff --git a/src/deps/skia/include/core/SkRSXform.h b/src/deps/skia/include/core/SkRSXform.h new file mode 100644 index 000000000..91653311d --- /dev/null +++ b/src/deps/skia/include/core/SkRSXform.h @@ -0,0 +1,69 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRSXform_DEFINED +#define SkRSXform_DEFINED + +#include "include/core/SkPoint.h" +#include "include/core/SkSize.h" + +/** + * A compressed form of a rotation+scale matrix. + * + * [ fSCos -fSSin fTx ] + * [ fSSin fSCos fTy ] + * [ 0 0 1 ] + */ +struct SkRSXform { + static SkRSXform Make(SkScalar scos, SkScalar ssin, SkScalar tx, SkScalar ty) { + SkRSXform xform = { scos, ssin, tx, ty }; + return xform; + } + + /* + * Initialize a new xform based on the scale, rotation (in radians), final tx,ty location + * and anchor-point ax,ay within the src quad. + * + * Note: the anchor point is not normalized (e.g. 0...1) but is in pixels of the src image. + */ + static SkRSXform MakeFromRadians(SkScalar scale, SkScalar radians, SkScalar tx, SkScalar ty, + SkScalar ax, SkScalar ay) { + const SkScalar s = SkScalarSin(radians) * scale; + const SkScalar c = SkScalarCos(radians) * scale; + return Make(c, s, tx + -c * ax + s * ay, ty + -s * ax - c * ay); + } + + SkScalar fSCos; + SkScalar fSSin; + SkScalar fTx; + SkScalar fTy; + + bool rectStaysRect() const { + return 0 == fSCos || 0 == fSSin; + } + + void setIdentity() { + fSCos = 1; + fSSin = fTx = fTy = 0; + } + + void set(SkScalar scos, SkScalar ssin, SkScalar tx, SkScalar ty) { + fSCos = scos; + fSSin = ssin; + fTx = tx; + fTy = ty; + } + + void toQuad(SkScalar width, SkScalar height, SkPoint quad[4]) const; + void toQuad(const SkSize& size, SkPoint quad[4]) const { + this->toQuad(size.width(), size.height(), quad); + } + void toTriStrip(SkScalar width, SkScalar height, SkPoint strip[4]) const; +}; + +#endif + diff --git a/src/deps/skia/include/core/SkRasterHandleAllocator.h b/src/deps/skia/include/core/SkRasterHandleAllocator.h new file mode 100644 index 000000000..ad7c379ee --- /dev/null +++ b/src/deps/skia/include/core/SkRasterHandleAllocator.h @@ -0,0 +1,92 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRasterHandleAllocator_DEFINED +#define SkRasterHandleAllocator_DEFINED + +#include "include/core/SkImageInfo.h" + +class SkBitmap; +class SkCanvas; +class SkMatrix; + +/** + * If a client wants to control the allocation of raster layers in a canvas, it should subclass + * SkRasterHandleAllocator. This allocator performs two tasks: + * 1. controls how the memory for the pixels is allocated + * 2. associates a "handle" to a private object that can track the matrix/clip of the SkCanvas + * + * This example allocates a canvas, and defers to the allocator to create the base layer. + * + * std::unique_ptr<SkCanvas> canvas = SkRasterHandleAllocator::MakeCanvas( + * SkImageInfo::Make(...), + * std::make_unique<MySubclassRasterHandleAllocator>(...), + * nullptr); + * + * If you have already allocated the base layer (and its handle, release-proc etc.) then you + * can pass those in using the last parameter to MakeCanvas(). + * + * Regardless of how the base layer is allocated, each time canvas->saveLayer() is called, + * your allocator's allocHandle() will be called. + */ +class SK_API SkRasterHandleAllocator { +public: + virtual ~SkRasterHandleAllocator() = default; + + // The value that is returned to clients of the canvas that has this allocator installed. + typedef void* Handle; + + struct Rec { + // When the allocation goes out of scope, this proc is called to free everything associated + // with it: the pixels, the "handle", etc. This is passed the pixel address and fReleaseCtx. + void (*fReleaseProc)(void* pixels, void* ctx); + void* fReleaseCtx; // context passed to fReleaseProc + void* fPixels; // pixels for this allocation + size_t fRowBytes; // rowbytes for these pixels + Handle fHandle; // public handle returned by SkCanvas::accessTopRasterHandle() + }; + + /** + * Given a requested info, allocate the corresponding pixels/rowbytes, and whatever handle + * is desired to give clients access to those pixels. The rec also contains a proc and context + * which will be called when this allocation goes out of scope. + * + * e.g. + * when canvas->saveLayer() is called, the allocator will be called to allocate the pixels + * for the layer. When canvas->restore() is called, the fReleaseProc will be called. + */ + virtual bool allocHandle(const SkImageInfo&, Rec*) = 0; + + /** + * Clients access the handle for a given layer by calling SkCanvas::accessTopRasterHandle(). + * To allow the handle to reflect the current matrix/clip in the canvs, updateHandle() is + * is called. The subclass is responsible to update the handle as it sees fit. + */ + virtual void updateHandle(Handle, const SkMatrix&, const SkIRect&) = 0; + + /** + * This creates a canvas which will use the allocator to manage pixel allocations, including + * all calls to saveLayer(). + * + * If rec is non-null, then it will be used as the base-layer of pixels/handle. + * If rec is null, then the allocator will be called for the base-layer as well. + */ + static std::unique_ptr<SkCanvas> MakeCanvas(std::unique_ptr<SkRasterHandleAllocator>, + const SkImageInfo&, const Rec* rec = nullptr); + +protected: + SkRasterHandleAllocator() = default; + SkRasterHandleAllocator(const SkRasterHandleAllocator&) = delete; + SkRasterHandleAllocator& operator=(const SkRasterHandleAllocator&) = delete; + +private: + friend class SkBitmapDevice; + + Handle allocBitmap(const SkImageInfo&, SkBitmap*); +}; + +#endif diff --git a/src/deps/skia/include/core/SkRect.h b/src/deps/skia/include/core/SkRect.h new file mode 100644 index 000000000..99efe70bc --- /dev/null +++ b/src/deps/skia/include/core/SkRect.h @@ -0,0 +1,1378 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRect_DEFINED +#define SkRect_DEFINED + +#include "include/core/SkPoint.h" +#include "include/core/SkSize.h" +#include "include/private/SkSafe32.h" +#include "include/private/SkTFitsIn.h" + +#include <algorithm> +#include <utility> + +struct SkRect; + +/** \struct SkIRect + SkIRect holds four 32-bit integer coordinates describing the upper and + lower bounds of a rectangle. SkIRect may be created from outer bounds or + from position, width, and height. SkIRect describes an area; if its right + is less than or equal to its left, or if its bottom is less than or equal to + its top, it is considered empty. +*/ +struct SK_API SkIRect { + int32_t fLeft; //!< smaller x-axis bounds + int32_t fTop; //!< smaller y-axis bounds + int32_t fRight; //!< larger x-axis bounds + int32_t fBottom; //!< larger y-axis bounds + + /** Returns constructed SkIRect set to (0, 0, 0, 0). + Many other rectangles are empty; if left is equal to or greater than right, + or if top is equal to or greater than bottom. Setting all members to zero + is a convenience, but does not designate a special empty rectangle. + + @return bounds (0, 0, 0, 0) + */ + static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeEmpty() { + return SkIRect{0, 0, 0, 0}; + } + + /** Returns constructed SkIRect set to (0, 0, w, h). Does not validate input; w or h + may be negative. + + @param w width of constructed SkIRect + @param h height of constructed SkIRect + @return bounds (0, 0, w, h) + */ + static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeWH(int32_t w, int32_t h) { + return SkIRect{0, 0, w, h}; + } + + /** Returns constructed SkIRect set to (0, 0, size.width(), size.height()). + Does not validate input; size.width() or size.height() may be negative. + + @param size values for SkIRect width and height + @return bounds (0, 0, size.width(), size.height()) + */ + static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeSize(const SkISize& size) { + return SkIRect{0, 0, size.fWidth, size.fHeight}; + } + + /** Returns constructed SkIRect set to (pt.x(), pt.y(), pt.x() + size.width(), + pt.y() + size.height()). Does not validate input; size.width() or size.height() may be + negative. + + @param pt values for SkIRect fLeft and fTop + @param size values for SkIRect width and height + @return bounds at pt with width and height of size + */ + static constexpr SkIRect SK_WARN_UNUSED_RESULT MakePtSize(SkIPoint pt, SkISize size) { + return MakeXYWH(pt.x(), pt.y(), size.width(), size.height()); + } + + /** Returns constructed SkIRect set to (l, t, r, b). Does not sort input; SkIRect may + result in fLeft greater than fRight, or fTop greater than fBottom. + + @param l integer stored in fLeft + @param t integer stored in fTop + @param r integer stored in fRight + @param b integer stored in fBottom + @return bounds (l, t, r, b) + */ + static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeLTRB(int32_t l, int32_t t, + int32_t r, int32_t b) { + return SkIRect{l, t, r, b}; + } + + /** Returns constructed SkIRect set to: (x, y, x + w, y + h). + Does not validate input; w or h may be negative. + + @param x stored in fLeft + @param y stored in fTop + @param w added to x and stored in fRight + @param h added to y and stored in fBottom + @return bounds at (x, y) with width w and height h + */ + static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeXYWH(int32_t x, int32_t y, + int32_t w, int32_t h) { + return { x, y, Sk32_sat_add(x, w), Sk32_sat_add(y, h) }; + } + + /** Returns left edge of SkIRect, if sorted. + Call sort() to reverse fLeft and fRight if needed. + + @return fLeft + */ + constexpr int32_t left() const { return fLeft; } + + /** Returns top edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid, + and sort() to reverse fTop and fBottom if needed. + + @return fTop + */ + constexpr int32_t top() const { return fTop; } + + /** Returns right edge of SkIRect, if sorted. + Call sort() to reverse fLeft and fRight if needed. + + @return fRight + */ + constexpr int32_t right() const { return fRight; } + + /** Returns bottom edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid, + and sort() to reverse fTop and fBottom if needed. + + @return fBottom + */ + constexpr int32_t bottom() const { return fBottom; } + + /** Returns left edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid, + and sort() to reverse fLeft and fRight if needed. + + @return fLeft + */ + constexpr int32_t x() const { return fLeft; } + + /** Returns top edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid, + and sort() to reverse fTop and fBottom if needed. + + @return fTop + */ + constexpr int32_t y() const { return fTop; } + + // Experimental + constexpr SkIPoint topLeft() const { return {fLeft, fTop}; } + + /** Returns span on the x-axis. This does not check if SkIRect is sorted, or if + result fits in 32-bit signed integer; result may be negative. + + @return fRight minus fLeft + */ + constexpr int32_t width() const { return Sk32_can_overflow_sub(fRight, fLeft); } + + /** Returns span on the y-axis. This does not check if SkIRect is sorted, or if + result fits in 32-bit signed integer; result may be negative. + + @return fBottom minus fTop + */ + constexpr int32_t height() const { return Sk32_can_overflow_sub(fBottom, fTop); } + + /** Returns spans on the x-axis and y-axis. This does not check if SkIRect is sorted, + or if result fits in 32-bit signed integer; result may be negative. + + @return SkISize (width, height) + */ + constexpr SkISize size() const { return SkISize::Make(this->width(), this->height()); } + + /** Returns span on the x-axis. This does not check if SkIRect is sorted, so the + result may be negative. This is safer than calling width() since width() might + overflow in its calculation. + + @return fRight minus fLeft cast to int64_t + */ + constexpr int64_t width64() const { return (int64_t)fRight - (int64_t)fLeft; } + + /** Returns span on the y-axis. This does not check if SkIRect is sorted, so the + result may be negative. This is safer than calling height() since height() might + overflow in its calculation. + + @return fBottom minus fTop cast to int64_t + */ + constexpr int64_t height64() const { return (int64_t)fBottom - (int64_t)fTop; } + + /** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal + to or greater than fBottom. Call sort() to reverse rectangles with negative + width64() or height64(). + + @return true if width64() or height64() are zero or negative + */ + bool isEmpty64() const { return fRight <= fLeft || fBottom <= fTop; } + + /** Returns true if width() or height() are zero or negative. + + @return true if width() or height() are zero or negative + */ + bool isEmpty() const { + int64_t w = this->width64(); + int64_t h = this->height64(); + if (w <= 0 || h <= 0) { + return true; + } + // Return true if either exceeds int32_t + return !SkTFitsIn<int32_t>(w | h); + } + + /** Returns true if all members in a: fLeft, fTop, fRight, and fBottom; are + identical to corresponding members in b. + + @param a SkIRect to compare + @param b SkIRect to compare + @return true if members are equal + */ + friend bool operator==(const SkIRect& a, const SkIRect& b) { + return !memcmp(&a, &b, sizeof(a)); + } + + /** Returns true if any member in a: fLeft, fTop, fRight, and fBottom; is not + identical to the corresponding member in b. + + @param a SkIRect to compare + @param b SkIRect to compare + @return true if members are not equal + */ + friend bool operator!=(const SkIRect& a, const SkIRect& b) { + return !(a == b); + } + + /** Sets SkIRect to (0, 0, 0, 0). + + Many other rectangles are empty; if left is equal to or greater than right, + or if top is equal to or greater than bottom. Setting all members to zero + is a convenience, but does not designate a special empty rectangle. + */ + void setEmpty() { memset(this, 0, sizeof(*this)); } + + /** Sets SkIRect to (left, top, right, bottom). + left and right are not sorted; left is not necessarily less than right. + top and bottom are not sorted; top is not necessarily less than bottom. + + @param left stored in fLeft + @param top stored in fTop + @param right stored in fRight + @param bottom stored in fBottom + */ + void setLTRB(int32_t left, int32_t top, int32_t right, int32_t bottom) { + fLeft = left; + fTop = top; + fRight = right; + fBottom = bottom; + } + + /** Sets SkIRect to: (x, y, x + width, y + height). + Does not validate input; width or height may be negative. + + @param x stored in fLeft + @param y stored in fTop + @param width added to x and stored in fRight + @param height added to y and stored in fBottom + */ + void setXYWH(int32_t x, int32_t y, int32_t width, int32_t height) { + fLeft = x; + fTop = y; + fRight = Sk32_sat_add(x, width); + fBottom = Sk32_sat_add(y, height); + } + + void setWH(int32_t width, int32_t height) { + fLeft = 0; + fTop = 0; + fRight = width; + fBottom = height; + } + + void setSize(SkISize size) { + fLeft = 0; + fTop = 0; + fRight = size.width(); + fBottom = size.height(); + } + + /** Returns SkIRect offset by (dx, dy). + + If dx is negative, SkIRect returned is moved to the left. + If dx is positive, SkIRect returned is moved to the right. + If dy is negative, SkIRect returned is moved upward. + If dy is positive, SkIRect returned is moved downward. + + @param dx offset added to fLeft and fRight + @param dy offset added to fTop and fBottom + @return SkIRect offset by dx and dy, with original width and height + */ + constexpr SkIRect makeOffset(int32_t dx, int32_t dy) const { + return { + Sk32_sat_add(fLeft, dx), Sk32_sat_add(fTop, dy), + Sk32_sat_add(fRight, dx), Sk32_sat_add(fBottom, dy), + }; + } + + /** Returns SkIRect offset by (offset.x(), offset.y()). + + If offset.x() is negative, SkIRect returned is moved to the left. + If offset.x() is positive, SkIRect returned is moved to the right. + If offset.y() is negative, SkIRect returned is moved upward. + If offset.y() is positive, SkIRect returned is moved downward. + + @param offset translation vector + @return SkIRect translated by offset, with original width and height + */ + constexpr SkIRect makeOffset(SkIVector offset) const { + return this->makeOffset(offset.x(), offset.y()); + } + + /** Returns SkIRect, inset by (dx, dy). + + If dx is negative, SkIRect returned is wider. + If dx is positive, SkIRect returned is narrower. + If dy is negative, SkIRect returned is taller. + If dy is positive, SkIRect returned is shorter. + + @param dx offset added to fLeft and subtracted from fRight + @param dy offset added to fTop and subtracted from fBottom + @return SkIRect inset symmetrically left and right, top and bottom + */ + SkIRect makeInset(int32_t dx, int32_t dy) const { + return { + Sk32_sat_add(fLeft, dx), Sk32_sat_add(fTop, dy), + Sk32_sat_sub(fRight, dx), Sk32_sat_sub(fBottom, dy), + }; + } + + /** Returns SkIRect, outset by (dx, dy). + + If dx is negative, SkIRect returned is narrower. + If dx is positive, SkIRect returned is wider. + If dy is negative, SkIRect returned is shorter. + If dy is positive, SkIRect returned is taller. + + @param dx offset subtracted to fLeft and added from fRight + @param dy offset subtracted to fTop and added from fBottom + @return SkIRect outset symmetrically left and right, top and bottom + */ + SkIRect makeOutset(int32_t dx, int32_t dy) const { + return { + Sk32_sat_sub(fLeft, dx), Sk32_sat_sub(fTop, dy), + Sk32_sat_add(fRight, dx), Sk32_sat_add(fBottom, dy), + }; + } + + /** Offsets SkIRect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom. + + If dx is negative, moves SkIRect returned to the left. + If dx is positive, moves SkIRect returned to the right. + If dy is negative, moves SkIRect returned upward. + If dy is positive, moves SkIRect returned downward. + + @param dx offset added to fLeft and fRight + @param dy offset added to fTop and fBottom + */ + void offset(int32_t dx, int32_t dy) { + fLeft = Sk32_sat_add(fLeft, dx); + fTop = Sk32_sat_add(fTop, dy); + fRight = Sk32_sat_add(fRight, dx); + fBottom = Sk32_sat_add(fBottom, dy); + } + + /** Offsets SkIRect by adding delta.fX to fLeft, fRight; and by adding delta.fY to + fTop, fBottom. + + If delta.fX is negative, moves SkIRect returned to the left. + If delta.fX is positive, moves SkIRect returned to the right. + If delta.fY is negative, moves SkIRect returned upward. + If delta.fY is positive, moves SkIRect returned downward. + + @param delta offset added to SkIRect + */ + void offset(const SkIPoint& delta) { + this->offset(delta.fX, delta.fY); + } + + /** Offsets SkIRect so that fLeft equals newX, and fTop equals newY. width and height + are unchanged. + + @param newX stored in fLeft, preserving width() + @param newY stored in fTop, preserving height() + */ + void offsetTo(int32_t newX, int32_t newY) { + fRight = Sk64_pin_to_s32((int64_t)fRight + newX - fLeft); + fBottom = Sk64_pin_to_s32((int64_t)fBottom + newY - fTop); + fLeft = newX; + fTop = newY; + } + + /** Insets SkIRect by (dx,dy). + + If dx is positive, makes SkIRect narrower. + If dx is negative, makes SkIRect wider. + If dy is positive, makes SkIRect shorter. + If dy is negative, makes SkIRect taller. + + @param dx offset added to fLeft and subtracted from fRight + @param dy offset added to fTop and subtracted from fBottom + */ + void inset(int32_t dx, int32_t dy) { + fLeft = Sk32_sat_add(fLeft, dx); + fTop = Sk32_sat_add(fTop, dy); + fRight = Sk32_sat_sub(fRight, dx); + fBottom = Sk32_sat_sub(fBottom, dy); + } + + /** Outsets SkIRect by (dx, dy). + + If dx is positive, makes SkIRect wider. + If dx is negative, makes SkIRect narrower. + If dy is positive, makes SkIRect taller. + If dy is negative, makes SkIRect shorter. + + @param dx subtracted to fLeft and added from fRight + @param dy subtracted to fTop and added from fBottom + */ + void outset(int32_t dx, int32_t dy) { this->inset(-dx, -dy); } + + /** Adjusts SkIRect by adding dL to fLeft, dT to fTop, dR to fRight, and dB to fBottom. + + If dL is positive, narrows SkIRect on the left. If negative, widens it on the left. + If dT is positive, shrinks SkIRect on the top. If negative, lengthens it on the top. + If dR is positive, narrows SkIRect on the right. If negative, widens it on the right. + If dB is positive, shrinks SkIRect on the bottom. If negative, lengthens it on the bottom. + + The resulting SkIRect is not checked for validity. Thus, if the resulting SkIRect left is + greater than right, the SkIRect will be considered empty. Call sort() after this call + if that is not the desired behavior. + + @param dL offset added to fLeft + @param dT offset added to fTop + @param dR offset added to fRight + @param dB offset added to fBottom + */ + void adjust(int32_t dL, int32_t dT, int32_t dR, int32_t dB) { + fLeft = Sk32_sat_add(fLeft, dL); + fTop = Sk32_sat_add(fTop, dT); + fRight = Sk32_sat_add(fRight, dR); + fBottom = Sk32_sat_add(fBottom, dB); + } + + /** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom. + Returns false if SkIRect is empty. + + Considers input to describe constructed SkIRect: (x, y, x + 1, y + 1) and + returns true if constructed area is completely enclosed by SkIRect area. + + @param x test SkIPoint x-coordinate + @param y test SkIPoint y-coordinate + @return true if (x, y) is inside SkIRect + */ + bool contains(int32_t x, int32_t y) const { + return x >= fLeft && x < fRight && y >= fTop && y < fBottom; + } + + /** Returns true if SkIRect contains r. + Returns false if SkIRect is empty or r is empty. + + SkIRect contains r when SkIRect area completely includes r area. + + @param r SkIRect contained + @return true if all sides of SkIRect are outside r + */ + bool contains(const SkIRect& r) const { + return !r.isEmpty() && !this->isEmpty() && // check for empties + fLeft <= r.fLeft && fTop <= r.fTop && + fRight >= r.fRight && fBottom >= r.fBottom; + } + + /** Returns true if SkIRect contains r. + Returns false if SkIRect is empty or r is empty. + + SkIRect contains r when SkIRect area completely includes r area. + + @param r SkRect contained + @return true if all sides of SkIRect are outside r + */ + inline bool contains(const SkRect& r) const; + + /** Returns true if SkIRect contains construction. + Asserts if SkIRect is empty or construction is empty, and if SK_DEBUG is defined. + + Return is undefined if SkIRect is empty or construction is empty. + + @param r SkIRect contained + @return true if all sides of SkIRect are outside r + */ + bool containsNoEmptyCheck(const SkIRect& r) const { + SkASSERT(fLeft < fRight && fTop < fBottom); + SkASSERT(r.fLeft < r.fRight && r.fTop < r.fBottom); + return fLeft <= r.fLeft && fTop <= r.fTop && fRight >= r.fRight && fBottom >= r.fBottom; + } + + /** Returns true if SkIRect intersects r, and sets SkIRect to intersection. + Returns false if SkIRect does not intersect r, and leaves SkIRect unchanged. + + Returns false if either r or SkIRect is empty, leaving SkIRect unchanged. + + @param r limit of result + @return true if r and SkIRect have area in common + */ + bool intersect(const SkIRect& r) { + return this->intersect(*this, r); + } + + /** Returns true if a intersects b, and sets SkIRect to intersection. + Returns false if a does not intersect b, and leaves SkIRect unchanged. + + Returns false if either a or b is empty, leaving SkIRect unchanged. + + @param a SkIRect to intersect + @param b SkIRect to intersect + @return true if a and b have area in common + */ + bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& a, const SkIRect& b); + + /** Returns true if a intersects b. + Returns false if either a or b is empty, or do not intersect. + + @param a SkIRect to intersect + @param b SkIRect to intersect + @return true if a and b have area in common + */ + static bool Intersects(const SkIRect& a, const SkIRect& b) { + return SkIRect{}.intersect(a, b); + } + + /** Sets SkIRect to the union of itself and r. + + Has no effect if r is empty. Otherwise, if SkIRect is empty, sets SkIRect to r. + + @param r expansion SkIRect + + example: https://fiddle.skia.org/c/@IRect_join_2 + */ + void join(const SkIRect& r); + + /** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps + fTop and fBottom if fTop is greater than fBottom. Result may be empty, + and width() and height() will be zero or positive. + */ + void sort() { + using std::swap; + if (fLeft > fRight) { + swap(fLeft, fRight); + } + if (fTop > fBottom) { + swap(fTop, fBottom); + } + } + + /** Returns SkIRect with fLeft and fRight swapped if fLeft is greater than fRight; and + with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty; + and width() and height() will be zero or positive. + + @return sorted SkIRect + */ + SkIRect makeSorted() const { + return MakeLTRB(std::min(fLeft, fRight), std::min(fTop, fBottom), + std::max(fLeft, fRight), std::max(fTop, fBottom)); + } +}; + +/** \struct SkRect + SkRect holds four SkScalar coordinates describing the upper and + lower bounds of a rectangle. SkRect may be created from outer bounds or + from position, width, and height. SkRect describes an area; if its right + is less than or equal to its left, or if its bottom is less than or equal to + its top, it is considered empty. +*/ +struct SK_API SkRect { + SkScalar fLeft; //!< smaller x-axis bounds + SkScalar fTop; //!< smaller y-axis bounds + SkScalar fRight; //!< larger x-axis bounds + SkScalar fBottom; //!< larger y-axis bounds + + /** Returns constructed SkRect set to (0, 0, 0, 0). + Many other rectangles are empty; if left is equal to or greater than right, + or if top is equal to or greater than bottom. Setting all members to zero + is a convenience, but does not designate a special empty rectangle. + + @return bounds (0, 0, 0, 0) + */ + static constexpr SkRect SK_WARN_UNUSED_RESULT MakeEmpty() { + return SkRect{0, 0, 0, 0}; + } + + /** Returns constructed SkRect set to SkScalar values (0, 0, w, h). Does not + validate input; w or h may be negative. + + Passing integer values may generate a compiler warning since SkRect cannot + represent 32-bit integers exactly. Use SkIRect for an exact integer rectangle. + + @param w SkScalar width of constructed SkRect + @param h SkScalar height of constructed SkRect + @return bounds (0, 0, w, h) + */ + static constexpr SkRect SK_WARN_UNUSED_RESULT MakeWH(SkScalar w, SkScalar h) { + return SkRect{0, 0, w, h}; + } + + /** Returns constructed SkRect set to integer values (0, 0, w, h). Does not validate + input; w or h may be negative. + + Use to avoid a compiler warning that input may lose precision when stored. + Use SkIRect for an exact integer rectangle. + + @param w integer width of constructed SkRect + @param h integer height of constructed SkRect + @return bounds (0, 0, w, h) + */ + static SkRect SK_WARN_UNUSED_RESULT MakeIWH(int w, int h) { + return {0, 0, SkIntToScalar(w), SkIntToScalar(h)}; + } + + /** Returns constructed SkRect set to (0, 0, size.width(), size.height()). Does not + validate input; size.width() or size.height() may be negative. + + @param size SkScalar values for SkRect width and height + @return bounds (0, 0, size.width(), size.height()) + */ + static constexpr SkRect SK_WARN_UNUSED_RESULT MakeSize(const SkSize& size) { + return SkRect{0, 0, size.fWidth, size.fHeight}; + } + + /** Returns constructed SkRect set to (l, t, r, b). Does not sort input; SkRect may + result in fLeft greater than fRight, or fTop greater than fBottom. + + @param l SkScalar stored in fLeft + @param t SkScalar stored in fTop + @param r SkScalar stored in fRight + @param b SkScalar stored in fBottom + @return bounds (l, t, r, b) + */ + static constexpr SkRect SK_WARN_UNUSED_RESULT MakeLTRB(SkScalar l, SkScalar t, SkScalar r, + SkScalar b) { + return SkRect {l, t, r, b}; + } + + /** Returns constructed SkRect set to (x, y, x + w, y + h). + Does not validate input; w or h may be negative. + + @param x stored in fLeft + @param y stored in fTop + @param w added to x and stored in fRight + @param h added to y and stored in fBottom + @return bounds at (x, y) with width w and height h + */ + static constexpr SkRect SK_WARN_UNUSED_RESULT MakeXYWH(SkScalar x, SkScalar y, SkScalar w, + SkScalar h) { + return SkRect {x, y, x + w, y + h}; + } + + /** Returns constructed SkIRect set to (0, 0, size.width(), size.height()). + Does not validate input; size.width() or size.height() may be negative. + + @param size integer values for SkRect width and height + @return bounds (0, 0, size.width(), size.height()) + */ + static SkRect Make(const SkISize& size) { + return MakeIWH(size.width(), size.height()); + } + + /** Returns constructed SkIRect set to irect, promoting integers to scalar. + Does not validate input; fLeft may be greater than fRight, fTop may be greater + than fBottom. + + @param irect integer unsorted bounds + @return irect members converted to SkScalar + */ + static SkRect SK_WARN_UNUSED_RESULT Make(const SkIRect& irect) { + return { + SkIntToScalar(irect.fLeft), SkIntToScalar(irect.fTop), + SkIntToScalar(irect.fRight), SkIntToScalar(irect.fBottom) + }; + } + + /** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal + to or greater than fBottom. Call sort() to reverse rectangles with negative + width() or height(). + + @return true if width() or height() are zero or negative + */ + bool isEmpty() const { + // We write it as the NOT of a non-empty rect, so we will return true if any values + // are NaN. + return !(fLeft < fRight && fTop < fBottom); + } + + /** Returns true if fLeft is equal to or less than fRight, or if fTop is equal + to or less than fBottom. Call sort() to reverse rectangles with negative + width() or height(). + + @return true if width() or height() are zero or positive + */ + bool isSorted() const { return fLeft <= fRight && fTop <= fBottom; } + + /** Returns true if all values in the rectangle are finite: SK_ScalarMin or larger, + and SK_ScalarMax or smaller. + + @return true if no member is infinite or NaN + */ + bool isFinite() const { + float accum = 0; + accum *= fLeft; + accum *= fTop; + accum *= fRight; + accum *= fBottom; + + // accum is either NaN or it is finite (zero). + SkASSERT(0 == accum || SkScalarIsNaN(accum)); + + // value==value will be true iff value is not NaN + // TODO: is it faster to say !accum or accum==accum? + return !SkScalarIsNaN(accum); + } + + /** Returns left edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid. + Call sort() to reverse fLeft and fRight if needed. + + @return fLeft + */ + constexpr SkScalar x() const { return fLeft; } + + /** Returns top edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid, + and sort() to reverse fTop and fBottom if needed. + + @return fTop + */ + constexpr SkScalar y() const { return fTop; } + + /** Returns left edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid. + Call sort() to reverse fLeft and fRight if needed. + + @return fLeft + */ + constexpr SkScalar left() const { return fLeft; } + + /** Returns top edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid, + and sort() to reverse fTop and fBottom if needed. + + @return fTop + */ + constexpr SkScalar top() const { return fTop; } + + /** Returns right edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid. + Call sort() to reverse fLeft and fRight if needed. + + @return fRight + */ + constexpr SkScalar right() const { return fRight; } + + /** Returns bottom edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid, + and sort() to reverse fTop and fBottom if needed. + + @return fBottom + */ + constexpr SkScalar bottom() const { return fBottom; } + + /** Returns span on the x-axis. This does not check if SkRect is sorted, or if + result fits in 32-bit float; result may be negative or infinity. + + @return fRight minus fLeft + */ + constexpr SkScalar width() const { return fRight - fLeft; } + + /** Returns span on the y-axis. This does not check if SkRect is sorted, or if + result fits in 32-bit float; result may be negative or infinity. + + @return fBottom minus fTop + */ + constexpr SkScalar height() const { return fBottom - fTop; } + + /** Returns average of left edge and right edge. Result does not change if SkRect + is sorted. Result may overflow to infinity if SkRect is far from the origin. + + @return midpoint on x-axis + */ + SkScalar centerX() const { + // don't use SkScalarHalf(fLeft + fBottom) as that might overflow before the 0.5 + return SkScalarHalf(fLeft) + SkScalarHalf(fRight); + } + + /** Returns average of top edge and bottom edge. Result does not change if SkRect + is sorted. + + @return midpoint on y-axis + */ + SkScalar centerY() const { + // don't use SkScalarHalf(fTop + fBottom) as that might overflow before the 0.5 + return SkScalarHalf(fTop) + SkScalarHalf(fBottom); + } + + /** Returns true if all members in a: fLeft, fTop, fRight, and fBottom; are + equal to the corresponding members in b. + + a and b are not equal if either contain NaN. a and b are equal if members + contain zeroes with different signs. + + @param a SkRect to compare + @param b SkRect to compare + @return true if members are equal + */ + friend bool operator==(const SkRect& a, const SkRect& b) { + return SkScalarsEqual((const SkScalar*)&a, (const SkScalar*)&b, 4); + } + + /** Returns true if any in a: fLeft, fTop, fRight, and fBottom; does not + equal the corresponding members in b. + + a and b are not equal if either contain NaN. a and b are equal if members + contain zeroes with different signs. + + @param a SkRect to compare + @param b SkRect to compare + @return true if members are not equal + */ + friend bool operator!=(const SkRect& a, const SkRect& b) { + return !SkScalarsEqual((const SkScalar*)&a, (const SkScalar*)&b, 4); + } + + /** Returns four points in quad that enclose SkRect ordered as: top-left, top-right, + bottom-right, bottom-left. + + TODO: Consider adding parameter to control whether quad is clockwise or counterclockwise. + + @param quad storage for corners of SkRect + + example: https://fiddle.skia.org/c/@Rect_toQuad + */ + void toQuad(SkPoint quad[4]) const; + + /** Sets SkRect to (0, 0, 0, 0). + + Many other rectangles are empty; if left is equal to or greater than right, + or if top is equal to or greater than bottom. Setting all members to zero + is a convenience, but does not designate a special empty rectangle. + */ + void setEmpty() { *this = MakeEmpty(); } + + /** Sets SkRect to src, promoting src members from integer to scalar. + Very large values in src may lose precision. + + @param src integer SkRect + */ + void set(const SkIRect& src) { + fLeft = SkIntToScalar(src.fLeft); + fTop = SkIntToScalar(src.fTop); + fRight = SkIntToScalar(src.fRight); + fBottom = SkIntToScalar(src.fBottom); + } + + /** Sets SkRect to (left, top, right, bottom). + left and right are not sorted; left is not necessarily less than right. + top and bottom are not sorted; top is not necessarily less than bottom. + + @param left stored in fLeft + @param top stored in fTop + @param right stored in fRight + @param bottom stored in fBottom + */ + void setLTRB(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) { + fLeft = left; + fTop = top; + fRight = right; + fBottom = bottom; + } + + /** Sets to bounds of SkPoint array with count entries. If count is zero or smaller, + or if SkPoint array contains an infinity or NaN, sets to (0, 0, 0, 0). + + Result is either empty or sorted: fLeft is less than or equal to fRight, and + fTop is less than or equal to fBottom. + + @param pts SkPoint array + @param count entries in array + */ + void setBounds(const SkPoint pts[], int count) { + (void)this->setBoundsCheck(pts, count); + } + + /** Sets to bounds of SkPoint array with count entries. Returns false if count is + zero or smaller, or if SkPoint array contains an infinity or NaN; in these cases + sets SkRect to (0, 0, 0, 0). + + Result is either empty or sorted: fLeft is less than or equal to fRight, and + fTop is less than or equal to fBottom. + + @param pts SkPoint array + @param count entries in array + @return true if all SkPoint values are finite + + example: https://fiddle.skia.org/c/@Rect_setBoundsCheck + */ + bool setBoundsCheck(const SkPoint pts[], int count); + + /** Sets to bounds of SkPoint pts array with count entries. If any SkPoint in pts + contains infinity or NaN, all SkRect dimensions are set to NaN. + + @param pts SkPoint array + @param count entries in array + + example: https://fiddle.skia.org/c/@Rect_setBoundsNoCheck + */ + void setBoundsNoCheck(const SkPoint pts[], int count); + + /** Sets bounds to the smallest SkRect enclosing SkPoint p0 and p1. The result is + sorted and may be empty. Does not check to see if values are finite. + + @param p0 corner to include + @param p1 corner to include + */ + void set(const SkPoint& p0, const SkPoint& p1) { + fLeft = std::min(p0.fX, p1.fX); + fRight = std::max(p0.fX, p1.fX); + fTop = std::min(p0.fY, p1.fY); + fBottom = std::max(p0.fY, p1.fY); + } + + /** Sets SkRect to (x, y, x + width, y + height). + Does not validate input; width or height may be negative. + + @param x stored in fLeft + @param y stored in fTop + @param width added to x and stored in fRight + @param height added to y and stored in fBottom + */ + void setXYWH(SkScalar x, SkScalar y, SkScalar width, SkScalar height) { + fLeft = x; + fTop = y; + fRight = x + width; + fBottom = y + height; + } + + /** Sets SkRect to (0, 0, width, height). Does not validate input; + width or height may be negative. + + @param width stored in fRight + @param height stored in fBottom + */ + void setWH(SkScalar width, SkScalar height) { + fLeft = 0; + fTop = 0; + fRight = width; + fBottom = height; + } + void setIWH(int32_t width, int32_t height) { + this->setWH(SkIntToScalar(width), SkIntToScalar(height)); + } + + /** Returns SkRect offset by (dx, dy). + + If dx is negative, SkRect returned is moved to the left. + If dx is positive, SkRect returned is moved to the right. + If dy is negative, SkRect returned is moved upward. + If dy is positive, SkRect returned is moved downward. + + @param dx added to fLeft and fRight + @param dy added to fTop and fBottom + @return SkRect offset on axes, with original width and height + */ + constexpr SkRect makeOffset(SkScalar dx, SkScalar dy) const { + return MakeLTRB(fLeft + dx, fTop + dy, fRight + dx, fBottom + dy); + } + + /** Returns SkRect offset by v. + + @param v added to rect + @return SkRect offset on axes, with original width and height + */ + constexpr SkRect makeOffset(SkVector v) const { return this->makeOffset(v.x(), v.y()); } + + /** Returns SkRect, inset by (dx, dy). + + If dx is negative, SkRect returned is wider. + If dx is positive, SkRect returned is narrower. + If dy is negative, SkRect returned is taller. + If dy is positive, SkRect returned is shorter. + + @param dx added to fLeft and subtracted from fRight + @param dy added to fTop and subtracted from fBottom + @return SkRect inset symmetrically left and right, top and bottom + */ + SkRect makeInset(SkScalar dx, SkScalar dy) const { + return MakeLTRB(fLeft + dx, fTop + dy, fRight - dx, fBottom - dy); + } + + /** Returns SkRect, outset by (dx, dy). + + If dx is negative, SkRect returned is narrower. + If dx is positive, SkRect returned is wider. + If dy is negative, SkRect returned is shorter. + If dy is positive, SkRect returned is taller. + + @param dx subtracted to fLeft and added from fRight + @param dy subtracted to fTop and added from fBottom + @return SkRect outset symmetrically left and right, top and bottom + */ + SkRect makeOutset(SkScalar dx, SkScalar dy) const { + return MakeLTRB(fLeft - dx, fTop - dy, fRight + dx, fBottom + dy); + } + + /** Offsets SkRect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom. + + If dx is negative, moves SkRect to the left. + If dx is positive, moves SkRect to the right. + If dy is negative, moves SkRect upward. + If dy is positive, moves SkRect downward. + + @param dx offset added to fLeft and fRight + @param dy offset added to fTop and fBottom + */ + void offset(SkScalar dx, SkScalar dy) { + fLeft += dx; + fTop += dy; + fRight += dx; + fBottom += dy; + } + + /** Offsets SkRect by adding delta.fX to fLeft, fRight; and by adding delta.fY to + fTop, fBottom. + + If delta.fX is negative, moves SkRect to the left. + If delta.fX is positive, moves SkRect to the right. + If delta.fY is negative, moves SkRect upward. + If delta.fY is positive, moves SkRect downward. + + @param delta added to SkRect + */ + void offset(const SkPoint& delta) { + this->offset(delta.fX, delta.fY); + } + + /** Offsets SkRect so that fLeft equals newX, and fTop equals newY. width and height + are unchanged. + + @param newX stored in fLeft, preserving width() + @param newY stored in fTop, preserving height() + */ + void offsetTo(SkScalar newX, SkScalar newY) { + fRight += newX - fLeft; + fBottom += newY - fTop; + fLeft = newX; + fTop = newY; + } + + /** Insets SkRect by (dx, dy). + + If dx is positive, makes SkRect narrower. + If dx is negative, makes SkRect wider. + If dy is positive, makes SkRect shorter. + If dy is negative, makes SkRect taller. + + @param dx added to fLeft and subtracted from fRight + @param dy added to fTop and subtracted from fBottom + */ + void inset(SkScalar dx, SkScalar dy) { + fLeft += dx; + fTop += dy; + fRight -= dx; + fBottom -= dy; + } + + /** Outsets SkRect by (dx, dy). + + If dx is positive, makes SkRect wider. + If dx is negative, makes SkRect narrower. + If dy is positive, makes SkRect taller. + If dy is negative, makes SkRect shorter. + + @param dx subtracted to fLeft and added from fRight + @param dy subtracted to fTop and added from fBottom + */ + void outset(SkScalar dx, SkScalar dy) { this->inset(-dx, -dy); } + + /** Returns true if SkRect intersects r, and sets SkRect to intersection. + Returns false if SkRect does not intersect r, and leaves SkRect unchanged. + + Returns false if either r or SkRect is empty, leaving SkRect unchanged. + + @param r limit of result + @return true if r and SkRect have area in common + + example: https://fiddle.skia.org/c/@Rect_intersect + */ + bool intersect(const SkRect& r); + + /** Returns true if a intersects b, and sets SkRect to intersection. + Returns false if a does not intersect b, and leaves SkRect unchanged. + + Returns false if either a or b is empty, leaving SkRect unchanged. + + @param a SkRect to intersect + @param b SkRect to intersect + @return true if a and b have area in common + */ + bool SK_WARN_UNUSED_RESULT intersect(const SkRect& a, const SkRect& b); + + +private: + static bool Intersects(SkScalar al, SkScalar at, SkScalar ar, SkScalar ab, + SkScalar bl, SkScalar bt, SkScalar br, SkScalar bb) { + SkScalar L = std::max(al, bl); + SkScalar R = std::min(ar, br); + SkScalar T = std::max(at, bt); + SkScalar B = std::min(ab, bb); + return L < R && T < B; + } + +public: + + /** Returns true if SkRect intersects r. + Returns false if either r or SkRect is empty, or do not intersect. + + @param r SkRect to intersect + @return true if r and SkRect have area in common + */ + bool intersects(const SkRect& r) const { + return Intersects(fLeft, fTop, fRight, fBottom, + r.fLeft, r.fTop, r.fRight, r.fBottom); + } + + /** Returns true if a intersects b. + Returns false if either a or b is empty, or do not intersect. + + @param a SkRect to intersect + @param b SkRect to intersect + @return true if a and b have area in common + */ + static bool Intersects(const SkRect& a, const SkRect& b) { + return Intersects(a.fLeft, a.fTop, a.fRight, a.fBottom, + b.fLeft, b.fTop, b.fRight, b.fBottom); + } + + /** Sets SkRect to the union of itself and r. + + Has no effect if r is empty. Otherwise, if SkRect is empty, sets + SkRect to r. + + @param r expansion SkRect + + example: https://fiddle.skia.org/c/@Rect_join_2 + */ + void join(const SkRect& r); + + /** Sets SkRect to the union of itself and r. + + Asserts if r is empty and SK_DEBUG is defined. + If SkRect is empty, sets SkRect to r. + + May produce incorrect results if r is empty. + + @param r expansion SkRect + */ + void joinNonEmptyArg(const SkRect& r) { + SkASSERT(!r.isEmpty()); + // if we are empty, just assign + if (fLeft >= fRight || fTop >= fBottom) { + *this = r; + } else { + this->joinPossiblyEmptyRect(r); + } + } + + /** Sets SkRect to the union of itself and the construction. + + May produce incorrect results if SkRect or r is empty. + + @param r expansion SkRect + */ + void joinPossiblyEmptyRect(const SkRect& r) { + fLeft = std::min(fLeft, r.left()); + fTop = std::min(fTop, r.top()); + fRight = std::max(fRight, r.right()); + fBottom = std::max(fBottom, r.bottom()); + } + + /** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom. + Returns false if SkRect is empty. + + @param x test SkPoint x-coordinate + @param y test SkPoint y-coordinate + @return true if (x, y) is inside SkRect + */ + bool contains(SkScalar x, SkScalar y) const { + return x >= fLeft && x < fRight && y >= fTop && y < fBottom; + } + + /** Returns true if SkRect contains r. + Returns false if SkRect is empty or r is empty. + + SkRect contains r when SkRect area completely includes r area. + + @param r SkRect contained + @return true if all sides of SkRect are outside r + */ + bool contains(const SkRect& r) const { + // todo: can we eliminate the this->isEmpty check? + return !r.isEmpty() && !this->isEmpty() && + fLeft <= r.fLeft && fTop <= r.fTop && + fRight >= r.fRight && fBottom >= r.fBottom; + } + + /** Returns true if SkRect contains r. + Returns false if SkRect is empty or r is empty. + + SkRect contains r when SkRect area completely includes r area. + + @param r SkIRect contained + @return true if all sides of SkRect are outside r + */ + bool contains(const SkIRect& r) const { + // todo: can we eliminate the this->isEmpty check? + return !r.isEmpty() && !this->isEmpty() && + fLeft <= SkIntToScalar(r.fLeft) && fTop <= SkIntToScalar(r.fTop) && + fRight >= SkIntToScalar(r.fRight) && fBottom >= SkIntToScalar(r.fBottom); + } + + /** Sets SkIRect by adding 0.5 and discarding the fractional portion of SkRect + members, using (SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop), + SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom)). + + @param dst storage for SkIRect + */ + void round(SkIRect* dst) const { + SkASSERT(dst); + dst->setLTRB(SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop), + SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom)); + } + + /** Sets SkIRect by discarding the fractional portion of fLeft and fTop; and rounding + up fRight and fBottom, using + (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop), + SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)). + + @param dst storage for SkIRect + */ + void roundOut(SkIRect* dst) const { + SkASSERT(dst); + dst->setLTRB(SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop), + SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)); + } + + /** Sets SkRect by discarding the fractional portion of fLeft and fTop; and rounding + up fRight and fBottom, using + (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop), + SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)). + + @param dst storage for SkRect + */ + void roundOut(SkRect* dst) const { + dst->setLTRB(SkScalarFloorToScalar(fLeft), SkScalarFloorToScalar(fTop), + SkScalarCeilToScalar(fRight), SkScalarCeilToScalar(fBottom)); + } + + /** Sets SkRect by rounding up fLeft and fTop; and discarding the fractional portion + of fRight and fBottom, using + (SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop), + SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom)). + + @param dst storage for SkIRect + */ + void roundIn(SkIRect* dst) const { + SkASSERT(dst); + dst->setLTRB(SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop), + SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom)); + } + + /** Returns SkIRect by adding 0.5 and discarding the fractional portion of SkRect + members, using (SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop), + SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom)). + + @return rounded SkIRect + */ + SkIRect round() const { + SkIRect ir; + this->round(&ir); + return ir; + } + + /** Sets SkIRect by discarding the fractional portion of fLeft and fTop; and rounding + up fRight and fBottom, using + (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop), + SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)). + + @return rounded SkIRect + */ + SkIRect roundOut() const { + SkIRect ir; + this->roundOut(&ir); + return ir; + } + /** Sets SkIRect by rounding up fLeft and fTop; and discarding the fractional portion + of fRight and fBottom, using + (SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop), + SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom)). + + @return rounded SkIRect + */ + SkIRect roundIn() const { + SkIRect ir; + this->roundIn(&ir); + return ir; + } + + /** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps + fTop and fBottom if fTop is greater than fBottom. Result may be empty; + and width() and height() will be zero or positive. + */ + void sort() { + using std::swap; + if (fLeft > fRight) { + swap(fLeft, fRight); + } + + if (fTop > fBottom) { + swap(fTop, fBottom); + } + } + + /** Returns SkRect with fLeft and fRight swapped if fLeft is greater than fRight; and + with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty; + and width() and height() will be zero or positive. + + @return sorted SkRect + */ + SkRect makeSorted() const { + return MakeLTRB(std::min(fLeft, fRight), std::min(fTop, fBottom), + std::max(fLeft, fRight), std::max(fTop, fBottom)); + } + + /** Returns pointer to first scalar in SkRect, to treat it as an array with four + entries. + + @return pointer to fLeft + */ + const SkScalar* asScalars() const { return &fLeft; } + + /** Writes text representation of SkRect to standard output. Set asHex to true to + generate exact binary representations of floating point numbers. + + @param asHex true if SkScalar values are written as hexadecimal + + example: https://fiddle.skia.org/c/@Rect_dump + */ + void dump(bool asHex) const; + + /** Writes text representation of SkRect to standard output. The representation may be + directly compiled as C++ code. Floating point values are written + with limited precision; it may not be possible to reconstruct original SkRect + from output. + */ + void dump() const { this->dump(false); } + + /** Writes text representation of SkRect to standard output. The representation may be + directly compiled as C++ code. Floating point values are written + in hexadecimal to preserve their exact bit pattern. The output reconstructs the + original SkRect. + + Use instead of dump() when submitting + */ + void dumpHex() const { this->dump(true); } +}; + +inline bool SkIRect::contains(const SkRect& r) const { + return !r.isEmpty() && !this->isEmpty() && // check for empties + (SkScalar)fLeft <= r.fLeft && (SkScalar)fTop <= r.fTop && + (SkScalar)fRight >= r.fRight && (SkScalar)fBottom >= r.fBottom; +} + +#endif diff --git a/src/deps/skia/include/core/SkRefCnt.h b/src/deps/skia/include/core/SkRefCnt.h new file mode 100644 index 000000000..bcb75d769 --- /dev/null +++ b/src/deps/skia/include/core/SkRefCnt.h @@ -0,0 +1,382 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRefCnt_DEFINED +#define SkRefCnt_DEFINED + +#include "include/core/SkTypes.h" + +#include <atomic> // std::atomic, std::memory_order_* +#include <cstddef> // std::nullptr_t +#include <iosfwd> // std::basic_ostream +#include <memory> // TODO: unused +#include <type_traits> // std::enable_if, std::is_convertible +#include <utility> // std::forward, std::swap + +/** \class SkRefCntBase + + SkRefCntBase is the base class for objects that may be shared by multiple + objects. When an existing owner wants to share a reference, it calls ref(). + When an owner wants to release its reference, it calls unref(). When the + shared object's reference count goes to zero as the result of an unref() + call, its (virtual) destructor is called. It is an error for the + destructor to be called explicitly (or via the object going out of scope on + the stack or calling delete) if getRefCnt() > 1. +*/ +class SK_API SkRefCntBase { +public: + /** Default construct, initializing the reference count to 1. + */ + SkRefCntBase() : fRefCnt(1) {} + + /** Destruct, asserting that the reference count is 1. + */ + virtual ~SkRefCntBase() { + #ifdef SK_DEBUG + SkASSERTF(this->getRefCnt() == 1, "fRefCnt was %d", this->getRefCnt()); + // illegal value, to catch us if we reuse after delete + fRefCnt.store(0, std::memory_order_relaxed); + #endif + } + + /** May return true if the caller is the only owner. + * Ensures that all previous owner's actions are complete. + */ + bool unique() const { + if (1 == fRefCnt.load(std::memory_order_acquire)) { + // The acquire barrier is only really needed if we return true. It + // prevents code conditioned on the result of unique() from running + // until previous owners are all totally done calling unref(). + return true; + } + return false; + } + + /** Increment the reference count. Must be balanced by a call to unref(). + */ + void ref() const { + SkASSERT(this->getRefCnt() > 0); + // No barrier required. + (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); + } + + /** Decrement the reference count. If the reference count is 1 before the + decrement, then delete the object. Note that if this is the case, then + the object needs to have been allocated via new, and not on the stack. + */ + void unref() const { + SkASSERT(this->getRefCnt() > 0); + // A release here acts in place of all releases we "should" have been doing in ref(). + if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) { + // Like unique(), the acquire is only needed on success, to make sure + // code in internal_dispose() doesn't happen before the decrement. + this->internal_dispose(); + } + } + +private: + +#ifdef SK_DEBUG + /** Return the reference count. Use only for debugging. */ + int32_t getRefCnt() const { + return fRefCnt.load(std::memory_order_relaxed); + } +#endif + + /** + * Called when the ref count goes to 0. + */ + virtual void internal_dispose() const { + #ifdef SK_DEBUG + SkASSERT(0 == this->getRefCnt()); + fRefCnt.store(1, std::memory_order_relaxed); + #endif + delete this; + } + + // The following friends are those which override internal_dispose() + // and conditionally call SkRefCnt::internal_dispose(). + friend class SkWeakRefCnt; + + mutable std::atomic<int32_t> fRefCnt; + + SkRefCntBase(SkRefCntBase&&) = delete; + SkRefCntBase(const SkRefCntBase&) = delete; + SkRefCntBase& operator=(SkRefCntBase&&) = delete; + SkRefCntBase& operator=(const SkRefCntBase&) = delete; +}; + +#ifdef SK_REF_CNT_MIXIN_INCLUDE +// It is the responsibility of the following include to define the type SkRefCnt. +// This SkRefCnt should normally derive from SkRefCntBase. +#include SK_REF_CNT_MIXIN_INCLUDE +#else +class SK_API SkRefCnt : public SkRefCntBase { + // "#include SK_REF_CNT_MIXIN_INCLUDE" doesn't work with this build system. + #if defined(SK_BUILD_FOR_GOOGLE3) + public: + void deref() const { this->unref(); } + #endif +}; +#endif + +/////////////////////////////////////////////////////////////////////////////// + +/** Call obj->ref() and return obj. The obj must not be nullptr. + */ +template <typename T> static inline T* SkRef(T* obj) { + SkASSERT(obj); + obj->ref(); + return obj; +} + +/** Check if the argument is non-null, and if so, call obj->ref() and return obj. + */ +template <typename T> static inline T* SkSafeRef(T* obj) { + if (obj) { + obj->ref(); + } + return obj; +} + +/** Check if the argument is non-null, and if so, call obj->unref() + */ +template <typename T> static inline void SkSafeUnref(T* obj) { + if (obj) { + obj->unref(); + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// This is a variant of SkRefCnt that's Not Virtual, so weighs 4 bytes instead of 8 or 16. +// There's only benefit to using this if the deriving class does not otherwise need a vtable. +template <typename Derived> +class SkNVRefCnt { +public: + SkNVRefCnt() : fRefCnt(1) {} + ~SkNVRefCnt() { + #ifdef SK_DEBUG + int rc = fRefCnt.load(std::memory_order_relaxed); + SkASSERTF(rc == 1, "NVRefCnt was %d", rc); + #endif + } + + // Implementation is pretty much the same as SkRefCntBase. All required barriers are the same: + // - unique() needs acquire when it returns true, and no barrier if it returns false; + // - ref() doesn't need any barrier; + // - unref() needs a release barrier, and an acquire if it's going to call delete. + + bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); } + void ref() const { (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); } + void unref() const { + if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) { + // restore the 1 for our destructor's assert + SkDEBUGCODE(fRefCnt.store(1, std::memory_order_relaxed)); + delete (const Derived*)this; + } + } + void deref() const { this->unref(); } + + // This must be used with caution. It is only valid to call this when 'threadIsolatedTestCnt' + // refs are known to be isolated to the current thread. That is, it is known that there are at + // least 'threadIsolatedTestCnt' refs for which no other thread may make a balancing unref() + // call. Assuming the contract is followed, if this returns false then no other thread has + // ownership of this. If it returns true then another thread *may* have ownership. + bool refCntGreaterThan(int32_t threadIsolatedTestCnt) const { + int cnt = fRefCnt.load(std::memory_order_acquire); + // If this fails then the above contract has been violated. + SkASSERT(cnt >= threadIsolatedTestCnt); + return cnt > threadIsolatedTestCnt; + } + +private: + mutable std::atomic<int32_t> fRefCnt; + + SkNVRefCnt(SkNVRefCnt&&) = delete; + SkNVRefCnt(const SkNVRefCnt&) = delete; + SkNVRefCnt& operator=(SkNVRefCnt&&) = delete; + SkNVRefCnt& operator=(const SkNVRefCnt&) = delete; +}; + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/** + * Shared pointer class to wrap classes that support a ref()/unref() interface. + * + * This can be used for classes inheriting from SkRefCnt, but it also works for other + * classes that match the interface, but have different internal choices: e.g. the hosted class + * may have its ref/unref be thread-safe, but that is not assumed/imposed by sk_sp. + */ +template <typename T> class sk_sp { +public: + using element_type = T; + + constexpr sk_sp() : fPtr(nullptr) {} + constexpr sk_sp(std::nullptr_t) : fPtr(nullptr) {} + + /** + * Shares the underlying object by calling ref(), so that both the argument and the newly + * created sk_sp both have a reference to it. + */ + sk_sp(const sk_sp<T>& that) : fPtr(SkSafeRef(that.get())) {} + template <typename U, + typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> + sk_sp(const sk_sp<U>& that) : fPtr(SkSafeRef(that.get())) {} + + /** + * Move the underlying object from the argument to the newly created sk_sp. Afterwards only + * the new sk_sp will have a reference to the object, and the argument will point to null. + * No call to ref() or unref() will be made. + */ + sk_sp(sk_sp<T>&& that) : fPtr(that.release()) {} + template <typename U, + typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> + sk_sp(sk_sp<U>&& that) : fPtr(that.release()) {} + + /** + * Adopt the bare pointer into the newly created sk_sp. + * No call to ref() or unref() will be made. + */ + explicit sk_sp(T* obj) : fPtr(obj) {} + + /** + * Calls unref() on the underlying object pointer. + */ + ~sk_sp() { + SkSafeUnref(fPtr); + SkDEBUGCODE(fPtr = nullptr); + } + + sk_sp<T>& operator=(std::nullptr_t) { this->reset(); return *this; } + + /** + * Shares the underlying object referenced by the argument by calling ref() on it. If this + * sk_sp previously had a reference to an object (i.e. not null) it will call unref() on that + * object. + */ + sk_sp<T>& operator=(const sk_sp<T>& that) { + if (this != &that) { + this->reset(SkSafeRef(that.get())); + } + return *this; + } + template <typename U, + typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> + sk_sp<T>& operator=(const sk_sp<U>& that) { + this->reset(SkSafeRef(that.get())); + return *this; + } + + /** + * Move the underlying object from the argument to the sk_sp. If the sk_sp previously held + * a reference to another object, unref() will be called on that object. No call to ref() + * will be made. + */ + sk_sp<T>& operator=(sk_sp<T>&& that) { + this->reset(that.release()); + return *this; + } + template <typename U, + typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> + sk_sp<T>& operator=(sk_sp<U>&& that) { + this->reset(that.release()); + return *this; + } + + T& operator*() const { + SkASSERT(this->get() != nullptr); + return *this->get(); + } + + explicit operator bool() const { return this->get() != nullptr; } + + T* get() const { return fPtr; } + T* operator->() const { return fPtr; } + + /** + * Adopt the new bare pointer, and call unref() on any previously held object (if not null). + * No call to ref() will be made. + */ + void reset(T* ptr = nullptr) { + // Calling fPtr->unref() may call this->~() or this->reset(T*). + // http://wg21.cmeerw.net/lwg/issue998 + // http://wg21.cmeerw.net/lwg/issue2262 + T* oldPtr = fPtr; + fPtr = ptr; + SkSafeUnref(oldPtr); + } + + /** + * Return the bare pointer, and set the internal object pointer to nullptr. + * The caller must assume ownership of the object, and manage its reference count directly. + * No call to unref() will be made. + */ + T* SK_WARN_UNUSED_RESULT release() { + T* ptr = fPtr; + fPtr = nullptr; + return ptr; + } + + void swap(sk_sp<T>& that) /*noexcept*/ { + using std::swap; + swap(fPtr, that.fPtr); + } + +private: + T* fPtr; +}; + +template <typename T> inline void swap(sk_sp<T>& a, sk_sp<T>& b) /*noexcept*/ { + a.swap(b); +} + +template <typename T, typename U> inline bool operator==(const sk_sp<T>& a, const sk_sp<U>& b) { + return a.get() == b.get(); +} +template <typename T> inline bool operator==(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ { + return !a; +} +template <typename T> inline bool operator==(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ { + return !b; +} + +template <typename T, typename U> inline bool operator!=(const sk_sp<T>& a, const sk_sp<U>& b) { + return a.get() != b.get(); +} +template <typename T> inline bool operator!=(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ { + return static_cast<bool>(a); +} +template <typename T> inline bool operator!=(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ { + return static_cast<bool>(b); +} + +template <typename C, typename CT, typename T> +auto operator<<(std::basic_ostream<C, CT>& os, const sk_sp<T>& sp) -> decltype(os << sp.get()) { + return os << sp.get(); +} + +template <typename T, typename... Args> +sk_sp<T> sk_make_sp(Args&&... args) { + return sk_sp<T>(new T(std::forward<Args>(args)...)); +} + +/* + * Returns a sk_sp wrapping the provided ptr AND calls ref on it (if not null). + * + * This is different than the semantics of the constructor for sk_sp, which just wraps the ptr, + * effectively "adopting" it. + */ +template <typename T> sk_sp<T> sk_ref_sp(T* obj) { + return sk_sp<T>(SkSafeRef(obj)); +} + +template <typename T> sk_sp<T> sk_ref_sp(const T* obj) { + return sk_sp<T>(const_cast<T*>(SkSafeRef(obj))); +} + +#endif diff --git a/src/deps/skia/include/core/SkRegion.h b/src/deps/skia/include/core/SkRegion.h new file mode 100644 index 000000000..ab326d98f --- /dev/null +++ b/src/deps/skia/include/core/SkRegion.h @@ -0,0 +1,672 @@ +/* + * Copyright 2005 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRegion_DEFINED +#define SkRegion_DEFINED + +#include "include/core/SkRect.h" + +class SkPath; +class SkRgnBuilder; + +/** \class SkRegion + SkRegion describes the set of pixels used to clip SkCanvas. SkRegion is compact, + efficiently storing a single integer rectangle, or a run length encoded array + of rectangles. SkRegion may reduce the current SkCanvas clip, or may be drawn as + one or more integer rectangles. SkRegion iterator returns the scan lines or + rectangles contained by it, optionally intersecting a bounding rectangle. +*/ +class SK_API SkRegion { + typedef int32_t RunType; +public: + + /** Constructs an empty SkRegion. SkRegion is set to empty bounds + at (0, 0) with zero width and height. + + @return empty SkRegion + + example: https://fiddle.skia.org/c/@Region_empty_constructor + */ + SkRegion(); + + /** Constructs a copy of an existing region. + Copy constructor makes two regions identical by value. Internally, region and + the returned result share pointer values. The underlying SkRect array is + copied when modified. + + Creating a SkRegion copy is very efficient and never allocates memory. + SkRegion are always copied by value from the interface; the underlying shared + pointers are not exposed. + + @param region SkRegion to copy by value + @return copy of SkRegion + + example: https://fiddle.skia.org/c/@Region_copy_const_SkRegion + */ + SkRegion(const SkRegion& region); + + /** Constructs a rectangular SkRegion matching the bounds of rect. + + @param rect bounds of constructed SkRegion + @return rectangular SkRegion + + example: https://fiddle.skia.org/c/@Region_copy_const_SkIRect + */ + explicit SkRegion(const SkIRect& rect); + + /** Releases ownership of any shared data and deletes data if SkRegion is sole owner. + + example: https://fiddle.skia.org/c/@Region_destructor + */ + ~SkRegion(); + + /** Constructs a copy of an existing region. + Makes two regions identical by value. Internally, region and + the returned result share pointer values. The underlying SkRect array is + copied when modified. + + Creating a SkRegion copy is very efficient and never allocates memory. + SkRegion are always copied by value from the interface; the underlying shared + pointers are not exposed. + + @param region SkRegion to copy by value + @return SkRegion to copy by value + + example: https://fiddle.skia.org/c/@Region_copy_operator + */ + SkRegion& operator=(const SkRegion& region); + + /** Compares SkRegion and other; returns true if they enclose exactly + the same area. + + @param other SkRegion to compare + @return true if SkRegion pair are equivalent + + example: https://fiddle.skia.org/c/@Region_equal1_operator + */ + bool operator==(const SkRegion& other) const; + + /** Compares SkRegion and other; returns true if they do not enclose the same area. + + @param other SkRegion to compare + @return true if SkRegion pair are not equivalent + */ + bool operator!=(const SkRegion& other) const { + return !(*this == other); + } + + /** Sets SkRegion to src, and returns true if src bounds is not empty. + This makes SkRegion and src identical by value. Internally, + SkRegion and src share pointer values. The underlying SkRect array is + copied when modified. + + Creating a SkRegion copy is very efficient and never allocates memory. + SkRegion are always copied by value from the interface; the underlying shared + pointers are not exposed. + + @param src SkRegion to copy + @return copy of src + */ + bool set(const SkRegion& src) { + *this = src; + return !this->isEmpty(); + } + + /** Exchanges SkIRect array of SkRegion and other. swap() internally exchanges pointers, + so it is lightweight and does not allocate memory. + + swap() usage has largely been replaced by operator=(const SkRegion& region). + SkPath do not copy their content on assignment until they are written to, + making assignment as efficient as swap(). + + @param other operator=(const SkRegion& region) set + + example: https://fiddle.skia.org/c/@Region_swap + */ + void swap(SkRegion& other); + + /** Returns true if SkRegion is empty. + Empty SkRegion has bounds width or height less than or equal to zero. + SkRegion() constructs empty SkRegion; setEmpty() + and setRect() with dimensionless data make SkRegion empty. + + @return true if bounds has no width or height + */ + bool isEmpty() const { return fRunHead == emptyRunHeadPtr(); } + + /** Returns true if SkRegion is one SkIRect with positive dimensions. + + @return true if SkRegion contains one SkIRect + */ + bool isRect() const { return fRunHead == kRectRunHeadPtr; } + + /** Returns true if SkRegion is described by more than one rectangle. + + @return true if SkRegion contains more than one SkIRect + */ + bool isComplex() const { return !this->isEmpty() && !this->isRect(); } + + /** Returns minimum and maximum axes values of SkIRect array. + Returns (0, 0, 0, 0) if SkRegion is empty. + + @return combined bounds of all SkIRect elements + */ + const SkIRect& getBounds() const { return fBounds; } + + /** Returns a value that increases with the number of + elements in SkRegion. Returns zero if SkRegion is empty. + Returns one if SkRegion equals SkIRect; otherwise, returns + value greater than one indicating that SkRegion is complex. + + Call to compare SkRegion for relative complexity. + + @return relative complexity + + example: https://fiddle.skia.org/c/@Region_computeRegionComplexity + */ + int computeRegionComplexity() const; + + /** Appends outline of SkRegion to path. + Returns true if SkRegion is not empty; otherwise, returns false, and leaves path + unmodified. + + @param path SkPath to append to + @return true if path changed + + example: https://fiddle.skia.org/c/@Region_getBoundaryPath + */ + bool getBoundaryPath(SkPath* path) const; + + /** Constructs an empty SkRegion. SkRegion is set to empty bounds + at (0, 0) with zero width and height. Always returns false. + + @return false + + example: https://fiddle.skia.org/c/@Region_setEmpty + */ + bool setEmpty(); + + /** Constructs a rectangular SkRegion matching the bounds of rect. + If rect is empty, constructs empty and returns false. + + @param rect bounds of constructed SkRegion + @return true if rect is not empty + + example: https://fiddle.skia.org/c/@Region_setRect + */ + bool setRect(const SkIRect& rect); + + /** Constructs SkRegion as the union of SkIRect in rects array. If count is + zero, constructs empty SkRegion. Returns false if constructed SkRegion is empty. + + May be faster than repeated calls to op(). + + @param rects array of SkIRect + @param count array size + @return true if constructed SkRegion is not empty + + example: https://fiddle.skia.org/c/@Region_setRects + */ + bool setRects(const SkIRect rects[], int count); + + /** Constructs a copy of an existing region. + Makes two regions identical by value. Internally, region and + the returned result share pointer values. The underlying SkRect array is + copied when modified. + + Creating a SkRegion copy is very efficient and never allocates memory. + SkRegion are always copied by value from the interface; the underlying shared + pointers are not exposed. + + @param region SkRegion to copy by value + @return SkRegion to copy by value + + example: https://fiddle.skia.org/c/@Region_setRegion + */ + bool setRegion(const SkRegion& region); + + /** Constructs SkRegion to match outline of path within clip. + Returns false if constructed SkRegion is empty. + + Constructed SkRegion draws the same pixels as path through clip when + anti-aliasing is disabled. + + @param path SkPath providing outline + @param clip SkRegion containing path + @return true if constructed SkRegion is not empty + + example: https://fiddle.skia.org/c/@Region_setPath + */ + bool setPath(const SkPath& path, const SkRegion& clip); + + /** Returns true if SkRegion intersects rect. + Returns false if either rect or SkRegion is empty, or do not intersect. + + @param rect SkIRect to intersect + @return true if rect and SkRegion have area in common + + example: https://fiddle.skia.org/c/@Region_intersects + */ + bool intersects(const SkIRect& rect) const; + + /** Returns true if SkRegion intersects other. + Returns false if either other or SkRegion is empty, or do not intersect. + + @param other SkRegion to intersect + @return true if other and SkRegion have area in common + + example: https://fiddle.skia.org/c/@Region_intersects_2 + */ + bool intersects(const SkRegion& other) const; + + /** Returns true if SkIPoint (x, y) is inside SkRegion. + Returns false if SkRegion is empty. + + @param x test SkIPoint x-coordinate + @param y test SkIPoint y-coordinate + @return true if (x, y) is inside SkRegion + + example: https://fiddle.skia.org/c/@Region_contains + */ + bool contains(int32_t x, int32_t y) const; + + /** Returns true if other is completely inside SkRegion. + Returns false if SkRegion or other is empty. + + @param other SkIRect to contain + @return true if other is inside SkRegion + + example: https://fiddle.skia.org/c/@Region_contains_2 + */ + bool contains(const SkIRect& other) const; + + /** Returns true if other is completely inside SkRegion. + Returns false if SkRegion or other is empty. + + @param other SkRegion to contain + @return true if other is inside SkRegion + + example: https://fiddle.skia.org/c/@Region_contains_3 + */ + bool contains(const SkRegion& other) const; + + /** Returns true if SkRegion is a single rectangle and contains r. + May return false even though SkRegion contains r. + + @param r SkIRect to contain + @return true quickly if r points are equal or inside + */ + bool quickContains(const SkIRect& r) const { + SkASSERT(this->isEmpty() == fBounds.isEmpty()); // valid region + + return r.fLeft < r.fRight && r.fTop < r.fBottom && + fRunHead == kRectRunHeadPtr && // this->isRect() + /* fBounds.contains(left, top, right, bottom); */ + fBounds.fLeft <= r.fLeft && fBounds.fTop <= r.fTop && + fBounds.fRight >= r.fRight && fBounds.fBottom >= r.fBottom; + } + + /** Returns true if SkRegion does not intersect rect. + Returns true if rect is empty or SkRegion is empty. + May return false even though SkRegion does not intersect rect. + + @param rect SkIRect to intersect + @return true if rect does not intersect + */ + bool quickReject(const SkIRect& rect) const { + return this->isEmpty() || rect.isEmpty() || + !SkIRect::Intersects(fBounds, rect); + } + + /** Returns true if SkRegion does not intersect rgn. + Returns true if rgn is empty or SkRegion is empty. + May return false even though SkRegion does not intersect rgn. + + @param rgn SkRegion to intersect + @return true if rgn does not intersect + */ + bool quickReject(const SkRegion& rgn) const { + return this->isEmpty() || rgn.isEmpty() || + !SkIRect::Intersects(fBounds, rgn.fBounds); + } + + /** Offsets SkRegion by ivector (dx, dy). Has no effect if SkRegion is empty. + + @param dx x-axis offset + @param dy y-axis offset + */ + void translate(int dx, int dy) { this->translate(dx, dy, this); } + + /** Offsets SkRegion by ivector (dx, dy), writing result to dst. SkRegion may be passed + as dst parameter, translating SkRegion in place. Has no effect if dst is nullptr. + If SkRegion is empty, sets dst to empty. + + @param dx x-axis offset + @param dy y-axis offset + @param dst translated result + + example: https://fiddle.skia.org/c/@Region_translate_2 + */ + void translate(int dx, int dy, SkRegion* dst) const; + + /** \enum SkRegion::Op + The logical operations that can be performed when combining two SkRegion. + */ + enum Op { + kDifference_Op, //!< target minus operand + kIntersect_Op, //!< target intersected with operand + kUnion_Op, //!< target unioned with operand + kXOR_Op, //!< target exclusive or with operand + kReverseDifference_Op, //!< operand minus target + kReplace_Op, //!< replace target with operand + kLastOp = kReplace_Op, //!< last operator + }; + + static const int kOpCnt = kLastOp + 1; + + /** Replaces SkRegion with the result of SkRegion op rect. + Returns true if replaced SkRegion is not empty. + + @param rect SkIRect operand + @return false if result is empty + */ + bool op(const SkIRect& rect, Op op) { + if (this->isRect() && kIntersect_Op == op) { + if (!fBounds.intersect(rect)) { + return this->setEmpty(); + } + return true; + } + return this->op(*this, rect, op); + } + + /** Replaces SkRegion with the result of SkRegion op rgn. + Returns true if replaced SkRegion is not empty. + + @param rgn SkRegion operand + @return false if result is empty + */ + bool op(const SkRegion& rgn, Op op) { return this->op(*this, rgn, op); } + + /** Replaces SkRegion with the result of rect op rgn. + Returns true if replaced SkRegion is not empty. + + @param rect SkIRect operand + @param rgn SkRegion operand + @return false if result is empty + + example: https://fiddle.skia.org/c/@Region_op_4 + */ + bool op(const SkIRect& rect, const SkRegion& rgn, Op op); + + /** Replaces SkRegion with the result of rgn op rect. + Returns true if replaced SkRegion is not empty. + + @param rgn SkRegion operand + @param rect SkIRect operand + @return false if result is empty + + example: https://fiddle.skia.org/c/@Region_op_5 + */ + bool op(const SkRegion& rgn, const SkIRect& rect, Op op); + + /** Replaces SkRegion with the result of rgna op rgnb. + Returns true if replaced SkRegion is not empty. + + @param rgna SkRegion operand + @param rgnb SkRegion operand + @return false if result is empty + + example: https://fiddle.skia.org/c/@Region_op_6 + */ + bool op(const SkRegion& rgna, const SkRegion& rgnb, Op op); + +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + /** Private. Android framework only. + + @return string representation of SkRegion + */ + char* toString(); +#endif + + /** \class SkRegion::Iterator + Returns sequence of rectangles, sorted along y-axis, then x-axis, that make + up SkRegion. + */ + class SK_API Iterator { + public: + + /** Initializes SkRegion::Iterator with an empty SkRegion. done() on SkRegion::Iterator + returns true. + Call reset() to initialized SkRegion::Iterator at a later time. + + @return empty SkRegion iterator + */ + Iterator() : fRgn(nullptr), fDone(true) {} + + /** Sets SkRegion::Iterator to return elements of SkIRect array in region. + + @param region SkRegion to iterate + @return SkRegion iterator + + example: https://fiddle.skia.org/c/@Region_Iterator_copy_const_SkRegion + */ + Iterator(const SkRegion& region); + + /** SkPoint SkRegion::Iterator to start of SkRegion. + Returns true if SkRegion was set; otherwise, returns false. + + @return true if SkRegion was set + + example: https://fiddle.skia.org/c/@Region_Iterator_rewind + */ + bool rewind(); + + /** Resets iterator, using the new SkRegion. + + @param region SkRegion to iterate + + example: https://fiddle.skia.org/c/@Region_Iterator_reset + */ + void reset(const SkRegion& region); + + /** Returns true if SkRegion::Iterator is pointing to final SkIRect in SkRegion. + + @return true if data parsing is complete + */ + bool done() const { return fDone; } + + /** Advances SkRegion::Iterator to next SkIRect in SkRegion if it is not done. + + example: https://fiddle.skia.org/c/@Region_Iterator_next + */ + void next(); + + /** Returns SkIRect element in SkRegion. Does not return predictable results if SkRegion + is empty. + + @return part of SkRegion as SkIRect + */ + const SkIRect& rect() const { return fRect; } + + /** Returns SkRegion if set; otherwise, returns nullptr. + + @return iterated SkRegion + */ + const SkRegion* rgn() const { return fRgn; } + + private: + const SkRegion* fRgn; + const SkRegion::RunType* fRuns; + SkIRect fRect = {0, 0, 0, 0}; + bool fDone; + }; + + /** \class SkRegion::Cliperator + Returns the sequence of rectangles, sorted along y-axis, then x-axis, that make + up SkRegion intersected with the specified clip rectangle. + */ + class SK_API Cliperator { + public: + + /** Sets SkRegion::Cliperator to return elements of SkIRect array in SkRegion within clip. + + @param region SkRegion to iterate + @param clip bounds of iteration + @return SkRegion iterator + + example: https://fiddle.skia.org/c/@Region_Cliperator_const_SkRegion_const_SkIRect + */ + Cliperator(const SkRegion& region, const SkIRect& clip); + + /** Returns true if SkRegion::Cliperator is pointing to final SkIRect in SkRegion. + + @return true if data parsing is complete + */ + bool done() { return fDone; } + + /** Advances iterator to next SkIRect in SkRegion contained by clip. + + example: https://fiddle.skia.org/c/@Region_Cliperator_next + */ + void next(); + + /** Returns SkIRect element in SkRegion, intersected with clip passed to + SkRegion::Cliperator constructor. Does not return predictable results if SkRegion + is empty. + + @return part of SkRegion inside clip as SkIRect + */ + const SkIRect& rect() const { return fRect; } + + private: + Iterator fIter; + SkIRect fClip; + SkIRect fRect = {0, 0, 0, 0}; + bool fDone; + }; + + /** \class SkRegion::Spanerator + Returns the line segment ends within SkRegion that intersect a horizontal line. + */ + class Spanerator { + public: + + /** Sets SkRegion::Spanerator to return line segments in SkRegion on scan line. + + @param region SkRegion to iterate + @param y horizontal line to intersect + @param left bounds of iteration + @param right bounds of iteration + @return SkRegion iterator + + example: https://fiddle.skia.org/c/@Region_Spanerator_const_SkRegion_int_int_int + */ + Spanerator(const SkRegion& region, int y, int left, int right); + + /** Advances iterator to next span intersecting SkRegion within line segment provided + in constructor. Returns true if interval was found. + + @param left pointer to span start; may be nullptr + @param right pointer to span end; may be nullptr + @return true if interval was found + + example: https://fiddle.skia.org/c/@Region_Spanerator_next + */ + bool next(int* left, int* right); + + private: + const SkRegion::RunType* fRuns; + int fLeft, fRight; + bool fDone; + }; + + /** Writes SkRegion to buffer, and returns number of bytes written. + If buffer is nullptr, returns number number of bytes that would be written. + + @param buffer storage for binary data + @return size of SkRegion + + example: https://fiddle.skia.org/c/@Region_writeToMemory + */ + size_t writeToMemory(void* buffer) const; + + /** Constructs SkRegion from buffer of size length. Returns bytes read. + Returned value will be multiple of four or zero if length was too small. + + @param buffer storage for binary data + @param length size of buffer + @return bytes read + + example: https://fiddle.skia.org/c/@Region_readFromMemory + */ + size_t readFromMemory(const void* buffer, size_t length); + +private: + static constexpr int kOpCount = kReplace_Op + 1; + + // T + // [B N L R S] + // S + static constexpr int kRectRegionRuns = 7; + + struct RunHead; + + static RunHead* emptyRunHeadPtr() { return (SkRegion::RunHead*) -1; } + static constexpr RunHead* kRectRunHeadPtr = nullptr; + + // allocate space for count runs + void allocateRuns(int count); + void allocateRuns(int count, int ySpanCount, int intervalCount); + void allocateRuns(const RunHead& src); + + SkDEBUGCODE(void dump() const;) + + SkIRect fBounds; + RunHead* fRunHead; + + void freeRuns(); + + /** + * Return the runs from this region, consing up fake runs if the region + * is empty or a rect. In those 2 cases, we use tmpStorage to hold the + * run data. + */ + const RunType* getRuns(RunType tmpStorage[], int* intervals) const; + + // This is called with runs[] that do not yet have their interval-count + // field set on each scanline. That is computed as part of this call + // (inside ComputeRunBounds). + bool setRuns(RunType runs[], int count); + + int count_runtype_values(int* itop, int* ibot) const; + + bool isValid() const; + + static void BuildRectRuns(const SkIRect& bounds, + RunType runs[kRectRegionRuns]); + + // If the runs define a simple rect, return true and set bounds to that + // rect. If not, return false and ignore bounds. + static bool RunsAreARect(const SkRegion::RunType runs[], int count, + SkIRect* bounds); + + /** + * If the last arg is null, just return if the result is non-empty, + * else store the result in the last arg. + */ + static bool Oper(const SkRegion&, const SkRegion&, SkRegion::Op, SkRegion*); + + friend struct RunHead; + friend class Iterator; + friend class Spanerator; + friend class SkRegionPriv; + friend class SkRgnBuilder; + friend class SkFlatRegion; +}; + +#endif diff --git a/src/deps/skia/include/core/SkSamplingOptions.h b/src/deps/skia/include/core/SkSamplingOptions.h new file mode 100644 index 000000000..468cf5bef --- /dev/null +++ b/src/deps/skia/include/core/SkSamplingOptions.h @@ -0,0 +1,92 @@ +/* + * Copyright 2020 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImageSampling_DEFINED +#define SkImageSampling_DEFINED + +#include "include/core/SkTypes.h" +#include <new> + +enum class SkFilterMode { + kNearest, // single sample point (nearest neighbor) + kLinear, // interporate between 2x2 sample points (bilinear interpolation) + + kLast = kLinear, +}; + +enum class SkMipmapMode { + kNone, // ignore mipmap levels, sample from the "base" + kNearest, // sample from the nearest level + kLinear, // interpolate between the two nearest levels + + kLast = kLinear, +}; + +/* + * Specify B and C (each between 0...1) to create a shader that applies the corresponding + * cubic reconstruction filter to the image. + * + * Example values: + * B = 1/3, C = 1/3 "Mitchell" filter + * B = 0, C = 1/2 "Catmull-Rom" filter + * + * See "Reconstruction Filters in Computer Graphics" + * Don P. Mitchell + * Arun N. Netravali + * 1988 + * https://www.cs.utexas.edu/~fussell/courses/cs384g-fall2013/lectures/mitchell/Mitchell.pdf + * + * Desmos worksheet https://www.desmos.com/calculator/aghdpicrvr + * Nice overview https://entropymine.com/imageworsener/bicubic/ + */ +struct SkCubicResampler { + float B, C; + + // Historic default for kHigh_SkFilterQuality + static constexpr SkCubicResampler Mitchell() { return {1/3.0f, 1/3.0f}; } + static constexpr SkCubicResampler CatmullRom() { return {0.0f, 1/2.0f}; } +}; + +struct SK_API SkSamplingOptions { + const bool useCubic = false; + const SkCubicResampler cubic = {0, 0}; + const SkFilterMode filter = SkFilterMode::kNearest; + const SkMipmapMode mipmap = SkMipmapMode::kNone; + + SkSamplingOptions() = default; + SkSamplingOptions(const SkSamplingOptions&) = default; + SkSamplingOptions& operator=(const SkSamplingOptions& that) { + this->~SkSamplingOptions(); // A pedantic no-op. + new (this) SkSamplingOptions(that); + return *this; + } + + SkSamplingOptions(SkFilterMode fm, SkMipmapMode mm) + : useCubic(false) + , filter(fm) + , mipmap(mm) {} + + explicit SkSamplingOptions(SkFilterMode fm) + : useCubic(false) + , filter(fm) + , mipmap(SkMipmapMode::kNone) {} + + explicit SkSamplingOptions(const SkCubicResampler& c) + : useCubic(true) + , cubic(c) {} + + bool operator==(const SkSamplingOptions& other) const { + return useCubic == other.useCubic + && cubic.B == other.cubic.B + && cubic.C == other.cubic.C + && filter == other.filter + && mipmap == other.mipmap; + } + bool operator!=(const SkSamplingOptions& other) const { return !(*this == other); } +}; + +#endif diff --git a/src/deps/skia/include/core/SkScalar.h b/src/deps/skia/include/core/SkScalar.h new file mode 100644 index 000000000..07d4ec73d --- /dev/null +++ b/src/deps/skia/include/core/SkScalar.h @@ -0,0 +1,194 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkScalar_DEFINED +#define SkScalar_DEFINED + +#include "include/private/SkFloatingPoint.h" + +#undef SK_SCALAR_IS_FLOAT +#define SK_SCALAR_IS_FLOAT 1 + +typedef float SkScalar; + +#define SK_Scalar1 1.0f +#define SK_ScalarHalf 0.5f +#define SK_ScalarSqrt2 SK_FloatSqrt2 +#define SK_ScalarPI SK_FloatPI +#define SK_ScalarTanPIOver8 0.414213562f +#define SK_ScalarRoot2Over2 0.707106781f +#define SK_ScalarMax 3.402823466e+38f +#define SK_ScalarInfinity SK_FloatInfinity +#define SK_ScalarNegativeInfinity SK_FloatNegativeInfinity +#define SK_ScalarNaN SK_FloatNaN + +#define SkScalarFloorToScalar(x) sk_float_floor(x) +#define SkScalarCeilToScalar(x) sk_float_ceil(x) +#define SkScalarRoundToScalar(x) sk_float_floor((x) + 0.5f) +#define SkScalarTruncToScalar(x) sk_float_trunc(x) + +#define SkScalarFloorToInt(x) sk_float_floor2int(x) +#define SkScalarCeilToInt(x) sk_float_ceil2int(x) +#define SkScalarRoundToInt(x) sk_float_round2int(x) + +#define SkScalarAbs(x) sk_float_abs(x) +#define SkScalarCopySign(x, y) sk_float_copysign(x, y) +#define SkScalarMod(x, y) sk_float_mod(x,y) +#define SkScalarSqrt(x) sk_float_sqrt(x) +#define SkScalarPow(b, e) sk_float_pow(b, e) + +#define SkScalarSin(radians) (float)sk_float_sin(radians) +#define SkScalarCos(radians) (float)sk_float_cos(radians) +#define SkScalarTan(radians) (float)sk_float_tan(radians) +#define SkScalarASin(val) (float)sk_float_asin(val) +#define SkScalarACos(val) (float)sk_float_acos(val) +#define SkScalarATan2(y, x) (float)sk_float_atan2(y,x) +#define SkScalarExp(x) (float)sk_float_exp(x) +#define SkScalarLog(x) (float)sk_float_log(x) +#define SkScalarLog2(x) (float)sk_float_log2(x) + +////////////////////////////////////////////////////////////////////////////////////////////////// + +#define SkIntToScalar(x) static_cast<SkScalar>(x) +#define SkIntToFloat(x) static_cast<float>(x) +#define SkScalarTruncToInt(x) sk_float_saturate2int(x) + +#define SkScalarToFloat(x) static_cast<float>(x) +#define SkFloatToScalar(x) static_cast<SkScalar>(x) +#define SkScalarToDouble(x) static_cast<double>(x) +#define SkDoubleToScalar(x) sk_double_to_float(x) + +#define SK_ScalarMin (-SK_ScalarMax) + +static inline bool SkScalarIsNaN(SkScalar x) { return x != x; } + +/** Returns true if x is not NaN and not infinite + */ +static inline bool SkScalarIsFinite(SkScalar x) { return sk_float_isfinite(x); } + +static inline bool SkScalarsAreFinite(SkScalar a, SkScalar b) { + return sk_floats_are_finite(a, b); +} + +static inline bool SkScalarsAreFinite(const SkScalar array[], int count) { + return sk_floats_are_finite(array, count); +} + +/** + * Variant of SkScalarRoundToInt, that performs the rounding step (adding 0.5) explicitly using + * double, to avoid possibly losing the low bit(s) of the answer before calling floor(). + * + * This routine will likely be slower than SkScalarRoundToInt(), and should only be used when the + * extra precision is known to be valuable. + * + * In particular, this catches the following case: + * SkScalar x = 0.49999997; + * int ix = SkScalarRoundToInt(x); + * SkASSERT(0 == ix); // <--- fails + * ix = SkDScalarRoundToInt(x); + * SkASSERT(0 == ix); // <--- succeeds + */ +static inline int SkDScalarRoundToInt(SkScalar x) { + double xx = x; + xx += 0.5; + return (int)floor(xx); +} + +/** Returns the fractional part of the scalar. */ +static inline SkScalar SkScalarFraction(SkScalar x) { + return x - SkScalarTruncToScalar(x); +} + +static inline SkScalar SkScalarSquare(SkScalar x) { return x * x; } + +#define SkScalarInvert(x) sk_ieee_float_divide_TODO_IS_DIVIDE_BY_ZERO_SAFE_HERE(SK_Scalar1, (x)) +#define SkScalarAve(a, b) (((a) + (b)) * SK_ScalarHalf) +#define SkScalarHalf(a) ((a) * SK_ScalarHalf) + +#define SkDegreesToRadians(degrees) ((degrees) * (SK_ScalarPI / 180)) +#define SkRadiansToDegrees(radians) ((radians) * (180 / SK_ScalarPI)) + +static inline bool SkScalarIsInt(SkScalar x) { + return x == SkScalarFloorToScalar(x); +} + +/** + * Returns -1 || 0 || 1 depending on the sign of value: + * -1 if x < 0 + * 0 if x == 0 + * 1 if x > 0 + */ +static inline int SkScalarSignAsInt(SkScalar x) { + return x < 0 ? -1 : (x > 0); +} + +// Scalar result version of above +static inline SkScalar SkScalarSignAsScalar(SkScalar x) { + return x < 0 ? -SK_Scalar1 : ((x > 0) ? SK_Scalar1 : 0); +} + +#define SK_ScalarNearlyZero (SK_Scalar1 / (1 << 12)) + +static inline bool SkScalarNearlyZero(SkScalar x, + SkScalar tolerance = SK_ScalarNearlyZero) { + SkASSERT(tolerance >= 0); + return SkScalarAbs(x) <= tolerance; +} + +static inline bool SkScalarNearlyEqual(SkScalar x, SkScalar y, + SkScalar tolerance = SK_ScalarNearlyZero) { + SkASSERT(tolerance >= 0); + return SkScalarAbs(x-y) <= tolerance; +} + +static inline float SkScalarSinSnapToZero(SkScalar radians) { + float v = SkScalarSin(radians); + return SkScalarNearlyZero(v) ? 0.0f : v; +} + +static inline float SkScalarCosSnapToZero(SkScalar radians) { + float v = SkScalarCos(radians); + return SkScalarNearlyZero(v) ? 0.0f : v; +} + +/** Linearly interpolate between A and B, based on t. + If t is 0, return A + If t is 1, return B + else interpolate. + t must be [0..SK_Scalar1] +*/ +static inline SkScalar SkScalarInterp(SkScalar A, SkScalar B, SkScalar t) { + SkASSERT(t >= 0 && t <= SK_Scalar1); + return A + (B - A) * t; +} + +/** Interpolate along the function described by (keys[length], values[length]) + for the passed searchKey. SearchKeys outside the range keys[0]-keys[Length] + clamp to the min or max value. This function assumes the number of pairs + (length) will be small and a linear search is used. + + Repeated keys are allowed for discontinuous functions (so long as keys is + monotonically increasing). If key is the value of a repeated scalar in + keys the first one will be used. +*/ +SkScalar SkScalarInterpFunc(SkScalar searchKey, const SkScalar keys[], + const SkScalar values[], int length); + +/* + * Helper to compare an array of scalars. + */ +static inline bool SkScalarsEqual(const SkScalar a[], const SkScalar b[], int n) { + SkASSERT(n >= 0); + for (int i = 0; i < n; ++i) { + if (a[i] != b[i]) { + return false; + } + } + return true; +} + +#endif diff --git a/src/deps/skia/include/core/SkSerialProcs.h b/src/deps/skia/include/core/SkSerialProcs.h new file mode 100644 index 000000000..87e10d847 --- /dev/null +++ b/src/deps/skia/include/core/SkSerialProcs.h @@ -0,0 +1,73 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSerialProcs_DEFINED +#define SkSerialProcs_DEFINED + +#include "include/core/SkImage.h" +#include "include/core/SkPicture.h" +#include "include/core/SkTypeface.h" + +/** + * A serial-proc is asked to serialize the specified object (e.g. picture or image). + * If a data object is returned, it will be used (even if it is zero-length). + * If null is returned, then Skia will take its default action. + * + * The default action for pictures is to use Skia's internal format. + * The default action for images is to encode either in its native format or PNG. + * The default action for typefaces is to use Skia's internal format. + */ + +typedef sk_sp<SkData> (*SkSerialPictureProc)(SkPicture*, void* ctx); +typedef sk_sp<SkData> (*SkSerialImageProc)(SkImage*, void* ctx); +typedef sk_sp<SkData> (*SkSerialTypefaceProc)(SkTypeface*, void* ctx); + +/** + * Called with the encoded form of a picture (previously written with a custom + * SkSerialPictureProc proc). Return a picture object, or nullptr indicating failure. + */ +typedef sk_sp<SkPicture> (*SkDeserialPictureProc)(const void* data, size_t length, void* ctx); + +/** + * Called with the encoded from of an image. The proc can return an image object, or if it + * returns nullptr, then Skia will take its default action to try to create an image from the data. + * + * Note that unlike SkDeserialPictureProc and SkDeserialTypefaceProc, return nullptr from this + * does not indicate failure, but is a signal for Skia to take its default action. + */ +typedef sk_sp<SkImage> (*SkDeserialImageProc)(const void* data, size_t length, void* ctx); + +/** + * Called with the encoded form of a typeface (previously written with a custom + * SkSerialTypefaceProc proc). Return a typeface object, or nullptr indicating failure. + */ +typedef sk_sp<SkTypeface> (*SkDeserialTypefaceProc)(const void* data, size_t length, void* ctx); + +struct SK_API SkSerialProcs { + SkSerialPictureProc fPictureProc = nullptr; + void* fPictureCtx = nullptr; + + SkSerialImageProc fImageProc = nullptr; + void* fImageCtx = nullptr; + + SkSerialTypefaceProc fTypefaceProc = nullptr; + void* fTypefaceCtx = nullptr; +}; + +struct SK_API SkDeserialProcs { + SkDeserialPictureProc fPictureProc = nullptr; + void* fPictureCtx = nullptr; + + SkDeserialImageProc fImageProc = nullptr; + void* fImageCtx = nullptr; + + SkDeserialTypefaceProc fTypefaceProc = nullptr; + void* fTypefaceCtx = nullptr; +}; + +#endif + diff --git a/src/deps/skia/include/core/SkShader.h b/src/deps/skia/include/core/SkShader.h new file mode 100644 index 000000000..fba17f8ad --- /dev/null +++ b/src/deps/skia/include/core/SkShader.h @@ -0,0 +1,148 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkShader_DEFINED +#define SkShader_DEFINED + +#include "include/core/SkBlendMode.h" +#include "include/core/SkColor.h" +#include "include/core/SkFlattenable.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkMatrix.h" +#include "include/core/SkTileMode.h" + +class SkArenaAlloc; +class SkBitmap; +class SkBlender; +class SkColorFilter; +class SkColorSpace; +class SkImage; +class SkPath; +class SkPicture; +class SkRasterPipeline; +class GrFragmentProcessor; + +/** \class SkShader + * + * Shaders specify the source color(s) for what is being drawn. If a paint + * has no shader, then the paint's color is used. If the paint has a + * shader, then the shader's color(s) are use instead, but they are + * modulated by the paint's alpha. This makes it easy to create a shader + * once (e.g. bitmap tiling or gradient) and then change its transparency + * w/o having to modify the original shader... only the paint's alpha needs + * to be modified. + */ +class SK_API SkShader : public SkFlattenable { +public: + /** + * Returns true if the shader is guaranteed to produce only opaque + * colors, subject to the SkPaint using the shader to apply an opaque + * alpha value. Subclasses should override this to allow some + * optimizations. + */ + virtual bool isOpaque() const { return false; } + + /** + * Iff this shader is backed by a single SkImage, return its ptr (the caller must ref this + * if they want to keep it longer than the lifetime of the shader). If not, return nullptr. + */ + SkImage* isAImage(SkMatrix* localMatrix, SkTileMode xy[2]) const; + + bool isAImage() const { + return this->isAImage(nullptr, (SkTileMode*)nullptr) != nullptr; + } + + /** + * If the shader subclass can be represented as a gradient, asAGradient + * returns the matching GradientType enum (or kNone_GradientType if it + * cannot). Also, if info is not null, asAGradient populates info with + * the relevant (see below) parameters for the gradient. fColorCount + * is both an input and output parameter. On input, it indicates how + * many entries in fColors and fColorOffsets can be used, if they are + * non-NULL. After asAGradient has run, fColorCount indicates how + * many color-offset pairs there are in the gradient. If there is + * insufficient space to store all of the color-offset pairs, fColors + * and fColorOffsets will not be altered. fColorOffsets specifies + * where on the range of 0 to 1 to transition to the given color. + * The meaning of fPoint and fRadius is dependant on the type of gradient. + * + * None: + * info is ignored. + * Color: + * fColorOffsets[0] is meaningless. + * Linear: + * fPoint[0] and fPoint[1] are the end-points of the gradient + * Radial: + * fPoint[0] and fRadius[0] are the center and radius + * Conical: + * fPoint[0] and fRadius[0] are the center and radius of the 1st circle + * fPoint[1] and fRadius[1] are the center and radius of the 2nd circle + * Sweep: + * fPoint[0] is the center of the sweep. + */ + + enum GradientType { + kNone_GradientType, + kColor_GradientType, + kLinear_GradientType, + kRadial_GradientType, + kSweep_GradientType, + kConical_GradientType, + kLast_GradientType = kConical_GradientType, + }; + + struct GradientInfo { + int fColorCount; //!< In-out parameter, specifies passed size + // of fColors/fColorOffsets on input, and + // actual number of colors/offsets on + // output. + SkColor* fColors; //!< The colors in the gradient. + SkScalar* fColorOffsets; //!< The unit offset for color transitions. + SkPoint fPoint[2]; //!< Type specific, see above. + SkScalar fRadius[2]; //!< Type specific, see above. + SkTileMode fTileMode; + uint32_t fGradientFlags; //!< see SkGradientShader::Flags + }; + + // DEPRECATED. skbug.com/8941 + virtual GradientType asAGradient(GradientInfo* info) const; + + ////////////////////////////////////////////////////////////////////////// + // Methods to create combinations or variants of shaders + + /** + * Return a shader that will apply the specified localMatrix to this shader. + * The specified matrix will be applied before any matrix associated with this shader. + */ + sk_sp<SkShader> makeWithLocalMatrix(const SkMatrix&) const; + + /** + * Create a new shader that produces the same colors as invoking this shader and then applying + * the colorfilter. + */ + sk_sp<SkShader> makeWithColorFilter(sk_sp<SkColorFilter>) const; + +private: + SkShader() = default; + friend class SkShaderBase; + + using INHERITED = SkFlattenable; +}; + +class SK_API SkShaders { +public: + static sk_sp<SkShader> Empty(); + static sk_sp<SkShader> Color(SkColor); + static sk_sp<SkShader> Color(const SkColor4f&, sk_sp<SkColorSpace>); + static sk_sp<SkShader> Blend(SkBlendMode mode, sk_sp<SkShader> dst, sk_sp<SkShader> src); + static sk_sp<SkShader> Blend(sk_sp<SkBlender>, sk_sp<SkShader> dst, sk_sp<SkShader> src); + +private: + SkShaders() = delete; +}; + +#endif diff --git a/src/deps/skia/include/core/SkSize.h b/src/deps/skia/include/core/SkSize.h new file mode 100644 index 000000000..79d673775 --- /dev/null +++ b/src/deps/skia/include/core/SkSize.h @@ -0,0 +1,90 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSize_DEFINED +#define SkSize_DEFINED + +#include "include/core/SkScalar.h" + +struct SkISize { + int32_t fWidth; + int32_t fHeight; + + static constexpr SkISize Make(int32_t w, int32_t h) { return {w, h}; } + + static constexpr SkISize MakeEmpty() { return {0, 0}; } + + void set(int32_t w, int32_t h) { *this = SkISize{w, h}; } + + /** Returns true iff fWidth == 0 && fHeight == 0 + */ + bool isZero() const { return 0 == fWidth && 0 == fHeight; } + + /** Returns true if either width or height are <= 0 */ + bool isEmpty() const { return fWidth <= 0 || fHeight <= 0; } + + /** Set the width and height to 0 */ + void setEmpty() { fWidth = fHeight = 0; } + + constexpr int32_t width() const { return fWidth; } + constexpr int32_t height() const { return fHeight; } + + constexpr int64_t area() const { return fWidth * fHeight; } + + bool equals(int32_t w, int32_t h) const { return fWidth == w && fHeight == h; } +}; + +static inline bool operator==(const SkISize& a, const SkISize& b) { + return a.fWidth == b.fWidth && a.fHeight == b.fHeight; +} + +static inline bool operator!=(const SkISize& a, const SkISize& b) { return !(a == b); } + +/////////////////////////////////////////////////////////////////////////////// + +struct SkSize { + SkScalar fWidth; + SkScalar fHeight; + + static SkSize Make(SkScalar w, SkScalar h) { return {w, h}; } + + static SkSize Make(const SkISize& src) { + return {SkIntToScalar(src.width()), SkIntToScalar(src.height())}; + } + + static SkSize MakeEmpty() { return {0, 0}; } + + void set(SkScalar w, SkScalar h) { *this = SkSize{w, h}; } + + /** Returns true iff fWidth == 0 && fHeight == 0 + */ + bool isZero() const { return 0 == fWidth && 0 == fHeight; } + + /** Returns true if either width or height are <= 0 */ + bool isEmpty() const { return fWidth <= 0 || fHeight <= 0; } + + /** Set the width and height to 0 */ + void setEmpty() { *this = SkSize{0, 0}; } + + SkScalar width() const { return fWidth; } + SkScalar height() const { return fHeight; } + + bool equals(SkScalar w, SkScalar h) const { return fWidth == w && fHeight == h; } + + SkISize toRound() const { return {SkScalarRoundToInt(fWidth), SkScalarRoundToInt(fHeight)}; } + + SkISize toCeil() const { return {SkScalarCeilToInt(fWidth), SkScalarCeilToInt(fHeight)}; } + + SkISize toFloor() const { return {SkScalarFloorToInt(fWidth), SkScalarFloorToInt(fHeight)}; } +}; + +static inline bool operator==(const SkSize& a, const SkSize& b) { + return a.fWidth == b.fWidth && a.fHeight == b.fHeight; +} + +static inline bool operator!=(const SkSize& a, const SkSize& b) { return !(a == b); } +#endif diff --git a/src/deps/skia/include/core/SkSpan.h b/src/deps/skia/include/core/SkSpan.h new file mode 100644 index 000000000..b09ec867a --- /dev/null +++ b/src/deps/skia/include/core/SkSpan.h @@ -0,0 +1,89 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSpan_DEFINED +#define SkSpan_DEFINED + +#include <cstddef> +#include <iterator> +#include <type_traits> +#include <utility> +#include "include/private/SkTLogic.h" + +/** + * An SkSpan is a view of a contiguous collection of elements of type T. It can be directly + * constructed from a pointer and size. SkMakeSpan can be used to construct one from an array, + * or a container (like std::vector). + * + * With C++17, we could add template deduction guides that eliminate the need for SkMakeSpan: + * https://skia-review.googlesource.com/c/skia/+/320264 + */ +template <typename T> +class SkSpan { +public: + constexpr SkSpan() : fPtr{nullptr}, fSize{0} {} + constexpr SkSpan(T* ptr, size_t size) : fPtr{ptr}, fSize{size} { + SkASSERT(size < kMaxSize); + } + template <typename U, typename = typename std::enable_if<std::is_same<const U, T>::value>::type> + constexpr SkSpan(const SkSpan<U>& that) : fPtr(that.data()), fSize{that.size()} {} + constexpr SkSpan(const SkSpan& o) = default; + + constexpr SkSpan& operator=(const SkSpan& that) { + fPtr = that.fPtr; + fSize = that.fSize; + return *this; + } + constexpr T& operator [] (size_t i) const { + SkASSERT(i < this->size()); + return fPtr[i]; + } + constexpr T& front() const { return fPtr[0]; } + constexpr T& back() const { return fPtr[fSize - 1]; } + constexpr T* begin() const { return fPtr; } + constexpr T* end() const { return fPtr + fSize; } + constexpr auto rbegin() const { return std::make_reverse_iterator(this->end()); } + constexpr auto rend() const { return std::make_reverse_iterator(this->begin()); } + constexpr T* data() const { return this->begin(); } + constexpr size_t size() const { return fSize; } + constexpr bool empty() const { return fSize == 0; } + constexpr size_t size_bytes() const { return fSize * sizeof(T); } + constexpr SkSpan<T> first(size_t prefixLen) const { + SkASSERT(prefixLen <= this->size()); + return SkSpan{fPtr, prefixLen}; + } + constexpr SkSpan<T> last(size_t postfixLen) const { + SkASSERT(postfixLen <= this->size()); + return SkSpan{fPtr + (this->size() - postfixLen), postfixLen}; + } + constexpr SkSpan<T> subspan(size_t offset, size_t count) const { + SkASSERT(offset <= this->size()); + SkASSERT(count <= this->size() - offset); + return SkSpan{fPtr + offset, count}; + } + +private: + static constexpr size_t kMaxSize = std::numeric_limits<size_t>::max() / sizeof(T); + T* fPtr; + size_t fSize; +}; + +template <typename T, typename S> inline constexpr SkSpan<T> SkMakeSpan(T* p, S s) { + return SkSpan<T>{p, SkTo<size_t>(s)}; +} + +template <size_t N, typename T> inline constexpr SkSpan<T> SkMakeSpan(T (&a)[N]) { + return SkSpan<T>{a, N}; +} + +template <typename Container> +inline auto SkMakeSpan(Container& c) + -> SkSpan<typename std::remove_reference<decltype(*(c.data()))>::type> { + return {c.data(), c.size()}; +} + +#endif // SkSpan_DEFINED diff --git a/src/deps/skia/include/core/SkStream.h b/src/deps/skia/include/core/SkStream.h new file mode 100644 index 000000000..32dfff25b --- /dev/null +++ b/src/deps/skia/include/core/SkStream.h @@ -0,0 +1,524 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkStream_DEFINED +#define SkStream_DEFINED + +#include "include/core/SkData.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkScalar.h" +#include "include/private/SkTo.h" + +#include <memory.h> + +class SkStream; +class SkStreamRewindable; +class SkStreamSeekable; +class SkStreamAsset; +class SkStreamMemory; + +/** + * SkStream -- abstraction for a source of bytes. Subclasses can be backed by + * memory, or a file, or something else. + * + * NOTE: + * + * Classic "streams" APIs are sort of async, in that on a request for N + * bytes, they may return fewer than N bytes on a given call, in which case + * the caller can "try again" to get more bytes, eventually (modulo an error) + * receiving their total N bytes. + * + * Skia streams behave differently. They are effectively synchronous, and will + * always return all N bytes of the request if possible. If they return fewer + * (the read() call returns the number of bytes read) then that means there is + * no more data (at EOF or hit an error). The caller should *not* call again + * in hopes of fulfilling more of the request. + */ +class SK_API SkStream { +public: + virtual ~SkStream() {} + SkStream() {} + + /** + * Attempts to open the specified file as a stream, returns nullptr on failure. + */ + static std::unique_ptr<SkStreamAsset> MakeFromFile(const char path[]); + + /** Reads or skips size number of bytes. + * If buffer == NULL, skip size bytes, return how many were skipped. + * If buffer != NULL, copy size bytes into buffer, return how many were copied. + * @param buffer when NULL skip size bytes, otherwise copy size bytes into buffer + * @param size the number of bytes to skip or copy + * @return the number of bytes actually read. + */ + virtual size_t read(void* buffer, size_t size) = 0; + + /** Skip size number of bytes. + * @return the actual number bytes that could be skipped. + */ + size_t skip(size_t size) { + return this->read(nullptr, size); + } + + /** + * Attempt to peek at size bytes. + * If this stream supports peeking, copy min(size, peekable bytes) into + * buffer, and return the number of bytes copied. + * If the stream does not support peeking, or cannot peek any bytes, + * return 0 and leave buffer unchanged. + * The stream is guaranteed to be in the same visible state after this + * call, regardless of success or failure. + * @param buffer Must not be NULL, and must be at least size bytes. Destination + * to copy bytes. + * @param size Number of bytes to copy. + * @return The number of bytes peeked/copied. + */ + virtual size_t peek(void* /*buffer*/, size_t /*size*/) const { return 0; } + + /** Returns true when all the bytes in the stream have been read. + * This may return true early (when there are no more bytes to be read) + * or late (after the first unsuccessful read). + */ + virtual bool isAtEnd() const = 0; + + bool SK_WARN_UNUSED_RESULT readS8(int8_t*); + bool SK_WARN_UNUSED_RESULT readS16(int16_t*); + bool SK_WARN_UNUSED_RESULT readS32(int32_t*); + + bool SK_WARN_UNUSED_RESULT readU8(uint8_t* i) { return this->readS8((int8_t*)i); } + bool SK_WARN_UNUSED_RESULT readU16(uint16_t* i) { return this->readS16((int16_t*)i); } + bool SK_WARN_UNUSED_RESULT readU32(uint32_t* i) { return this->readS32((int32_t*)i); } + + bool SK_WARN_UNUSED_RESULT readBool(bool* b) { + uint8_t i; + if (!this->readU8(&i)) { return false; } + *b = (i != 0); + return true; + } + bool SK_WARN_UNUSED_RESULT readScalar(SkScalar*); + bool SK_WARN_UNUSED_RESULT readPackedUInt(size_t*); + +//SkStreamRewindable + /** Rewinds to the beginning of the stream. Returns true if the stream is known + * to be at the beginning after this call returns. + */ + virtual bool rewind() { return false; } + + /** Duplicates this stream. If this cannot be done, returns NULL. + * The returned stream will be positioned at the beginning of its data. + */ + std::unique_ptr<SkStream> duplicate() const { + return std::unique_ptr<SkStream>(this->onDuplicate()); + } + /** Duplicates this stream. If this cannot be done, returns NULL. + * The returned stream will be positioned the same as this stream. + */ + std::unique_ptr<SkStream> fork() const { + return std::unique_ptr<SkStream>(this->onFork()); + } + +//SkStreamSeekable + /** Returns true if this stream can report it's current position. */ + virtual bool hasPosition() const { return false; } + /** Returns the current position in the stream. If this cannot be done, returns 0. */ + virtual size_t getPosition() const { return 0; } + + /** Seeks to an absolute position in the stream. If this cannot be done, returns false. + * If an attempt is made to seek past the end of the stream, the position will be set + * to the end of the stream. + */ + virtual bool seek(size_t /*position*/) { return false; } + + /** Seeks to an relative offset in the stream. If this cannot be done, returns false. + * If an attempt is made to move to a position outside the stream, the position will be set + * to the closest point within the stream (beginning or end). + */ + virtual bool move(long /*offset*/) { return false; } + +//SkStreamAsset + /** Returns true if this stream can report it's total length. */ + virtual bool hasLength() const { return false; } + /** Returns the total length of the stream. If this cannot be done, returns 0. */ + virtual size_t getLength() const { return 0; } + +//SkStreamMemory + /** Returns the starting address for the data. If this cannot be done, returns NULL. */ + //TODO: replace with virtual const SkData* getData() + virtual const void* getMemoryBase() { return nullptr; } + +private: + virtual SkStream* onDuplicate() const { return nullptr; } + virtual SkStream* onFork() const { return nullptr; } + + SkStream(SkStream&&) = delete; + SkStream(const SkStream&) = delete; + SkStream& operator=(SkStream&&) = delete; + SkStream& operator=(const SkStream&) = delete; +}; + +/** SkStreamRewindable is a SkStream for which rewind and duplicate are required. */ +class SK_API SkStreamRewindable : public SkStream { +public: + bool rewind() override = 0; + std::unique_ptr<SkStreamRewindable> duplicate() const { + return std::unique_ptr<SkStreamRewindable>(this->onDuplicate()); + } +private: + SkStreamRewindable* onDuplicate() const override = 0; +}; + +/** SkStreamSeekable is a SkStreamRewindable for which position, seek, move, and fork are required. */ +class SK_API SkStreamSeekable : public SkStreamRewindable { +public: + std::unique_ptr<SkStreamSeekable> duplicate() const { + return std::unique_ptr<SkStreamSeekable>(this->onDuplicate()); + } + + bool hasPosition() const override { return true; } + size_t getPosition() const override = 0; + bool seek(size_t position) override = 0; + bool move(long offset) override = 0; + + std::unique_ptr<SkStreamSeekable> fork() const { + return std::unique_ptr<SkStreamSeekable>(this->onFork()); + } +private: + SkStreamSeekable* onDuplicate() const override = 0; + SkStreamSeekable* onFork() const override = 0; +}; + +/** SkStreamAsset is a SkStreamSeekable for which getLength is required. */ +class SK_API SkStreamAsset : public SkStreamSeekable { +public: + bool hasLength() const override { return true; } + size_t getLength() const override = 0; + + std::unique_ptr<SkStreamAsset> duplicate() const { + return std::unique_ptr<SkStreamAsset>(this->onDuplicate()); + } + std::unique_ptr<SkStreamAsset> fork() const { + return std::unique_ptr<SkStreamAsset>(this->onFork()); + } +private: + SkStreamAsset* onDuplicate() const override = 0; + SkStreamAsset* onFork() const override = 0; +}; + +/** SkStreamMemory is a SkStreamAsset for which getMemoryBase is required. */ +class SK_API SkStreamMemory : public SkStreamAsset { +public: + const void* getMemoryBase() override = 0; + + std::unique_ptr<SkStreamMemory> duplicate() const { + return std::unique_ptr<SkStreamMemory>(this->onDuplicate()); + } + std::unique_ptr<SkStreamMemory> fork() const { + return std::unique_ptr<SkStreamMemory>(this->onFork()); + } +private: + SkStreamMemory* onDuplicate() const override = 0; + SkStreamMemory* onFork() const override = 0; +}; + +class SK_API SkWStream { +public: + virtual ~SkWStream(); + SkWStream() {} + + /** Called to write bytes to a SkWStream. Returns true on success + @param buffer the address of at least size bytes to be written to the stream + @param size The number of bytes in buffer to write to the stream + @return true on success + */ + virtual bool write(const void* buffer, size_t size) = 0; + virtual void flush(); + + virtual size_t bytesWritten() const = 0; + + // helpers + + bool write8(U8CPU value) { + uint8_t v = SkToU8(value); + return this->write(&v, 1); + } + bool write16(U16CPU value) { + uint16_t v = SkToU16(value); + return this->write(&v, 2); + } + bool write32(uint32_t v) { + return this->write(&v, 4); + } + + bool writeText(const char text[]) { + SkASSERT(text); + return this->write(text, strlen(text)); + } + + bool newline() { return this->write("\n", strlen("\n")); } + + bool writeDecAsText(int32_t); + bool writeBigDecAsText(int64_t, int minDigits = 0); + bool writeHexAsText(uint32_t, int minDigits = 0); + bool writeScalarAsText(SkScalar); + + bool writeBool(bool v) { return this->write8(v); } + bool writeScalar(SkScalar); + bool writePackedUInt(size_t); + + bool writeStream(SkStream* input, size_t length); + + /** + * This returns the number of bytes in the stream required to store + * 'value'. + */ + static int SizeOfPackedUInt(size_t value); + +private: + SkWStream(const SkWStream&) = delete; + SkWStream& operator=(const SkWStream&) = delete; +}; + +class SK_API SkNullWStream : public SkWStream { +public: + SkNullWStream() : fBytesWritten(0) {} + + bool write(const void* , size_t n) override { fBytesWritten += n; return true; } + void flush() override {} + size_t bytesWritten() const override { return fBytesWritten; } + +private: + size_t fBytesWritten; +}; + +//////////////////////////////////////////////////////////////////////////////////////// + +#include <stdio.h> + +/** A stream that wraps a C FILE* file stream. */ +class SK_API SkFILEStream : public SkStreamAsset { +public: + /** Initialize the stream by calling sk_fopen on the specified path. + * This internal stream will be closed in the destructor. + */ + explicit SkFILEStream(const char path[] = nullptr); + + /** Initialize the stream with an existing C FILE stream. + * The current position of the C FILE stream will be considered the + * beginning of the SkFILEStream and the current seek end of the FILE will be the end. + * The C FILE stream will be closed in the destructor. + */ + explicit SkFILEStream(FILE* file); + + /** Initialize the stream with an existing C FILE stream. + * The current position of the C FILE stream will be considered the + * beginning of the SkFILEStream and size bytes later will be the end. + * The C FILE stream will be closed in the destructor. + */ + explicit SkFILEStream(FILE* file, size_t size); + + ~SkFILEStream() override; + + static std::unique_ptr<SkFILEStream> Make(const char path[]) { + std::unique_ptr<SkFILEStream> stream(new SkFILEStream(path)); + return stream->isValid() ? std::move(stream) : nullptr; + } + + /** Returns true if the current path could be opened. */ + bool isValid() const { return fFILE != nullptr; } + + /** Close this SkFILEStream. */ + void close(); + + size_t read(void* buffer, size_t size) override; + bool isAtEnd() const override; + + bool rewind() override; + std::unique_ptr<SkStreamAsset> duplicate() const { + return std::unique_ptr<SkStreamAsset>(this->onDuplicate()); + } + + size_t getPosition() const override; + bool seek(size_t position) override; + bool move(long offset) override; + + std::unique_ptr<SkStreamAsset> fork() const { + return std::unique_ptr<SkStreamAsset>(this->onFork()); + } + + size_t getLength() const override; + +private: + explicit SkFILEStream(FILE*, size_t size, size_t start); + explicit SkFILEStream(std::shared_ptr<FILE>, size_t end, size_t start); + explicit SkFILEStream(std::shared_ptr<FILE>, size_t end, size_t start, size_t current); + + SkStreamAsset* onDuplicate() const override; + SkStreamAsset* onFork() const override; + + std::shared_ptr<FILE> fFILE; + // My own council will I keep on sizes and offsets. + // These are seek positions in the underling FILE, not offsets into the stream. + size_t fEnd; + size_t fStart; + size_t fCurrent; + + using INHERITED = SkStreamAsset; +}; + +class SK_API SkMemoryStream : public SkStreamMemory { +public: + SkMemoryStream(); + + /** We allocate (and free) the memory. Write to it via getMemoryBase() */ + SkMemoryStream(size_t length); + + /** If copyData is true, the stream makes a private copy of the data. */ + SkMemoryStream(const void* data, size_t length, bool copyData = false); + + /** Creates the stream to read from the specified data */ + SkMemoryStream(sk_sp<SkData> data); + + /** Returns a stream with a copy of the input data. */ + static std::unique_ptr<SkMemoryStream> MakeCopy(const void* data, size_t length); + + /** Returns a stream with a bare pointer reference to the input data. */ + static std::unique_ptr<SkMemoryStream> MakeDirect(const void* data, size_t length); + + /** Returns a stream with a shared reference to the input data. */ + static std::unique_ptr<SkMemoryStream> Make(sk_sp<SkData> data); + + /** Resets the stream to the specified data and length, + just like the constructor. + if copyData is true, the stream makes a private copy of the data + */ + virtual void setMemory(const void* data, size_t length, + bool copyData = false); + /** Replace any memory buffer with the specified buffer. The caller + must have allocated data with sk_malloc or sk_realloc, since it + will be freed with sk_free. + */ + void setMemoryOwned(const void* data, size_t length); + + sk_sp<SkData> asData() const { return fData; } + void setData(sk_sp<SkData> data); + + void skipToAlign4(); + const void* getAtPos(); + + size_t read(void* buffer, size_t size) override; + bool isAtEnd() const override; + + size_t peek(void* buffer, size_t size) const override; + + bool rewind() override; + + std::unique_ptr<SkMemoryStream> duplicate() const { + return std::unique_ptr<SkMemoryStream>(this->onDuplicate()); + } + + size_t getPosition() const override; + bool seek(size_t position) override; + bool move(long offset) override; + + std::unique_ptr<SkMemoryStream> fork() const { + return std::unique_ptr<SkMemoryStream>(this->onFork()); + } + + size_t getLength() const override; + + const void* getMemoryBase() override; + +private: + SkMemoryStream* onDuplicate() const override; + SkMemoryStream* onFork() const override; + + sk_sp<SkData> fData; + size_t fOffset; + + using INHERITED = SkStreamMemory; +}; + +///////////////////////////////////////////////////////////////////////////////////////////// + +class SK_API SkFILEWStream : public SkWStream { +public: + SkFILEWStream(const char path[]); + ~SkFILEWStream() override; + + /** Returns true if the current path could be opened. + */ + bool isValid() const { return fFILE != nullptr; } + + bool write(const void* buffer, size_t size) override; + void flush() override; + void fsync(); + size_t bytesWritten() const override; + +private: + FILE* fFILE; + + using INHERITED = SkWStream; +}; + +class SK_API SkDynamicMemoryWStream : public SkWStream { +public: + SkDynamicMemoryWStream() = default; + SkDynamicMemoryWStream(SkDynamicMemoryWStream&&); + SkDynamicMemoryWStream& operator=(SkDynamicMemoryWStream&&); + ~SkDynamicMemoryWStream() override; + + bool write(const void* buffer, size_t size) override; + size_t bytesWritten() const override; + + bool read(void* buffer, size_t offset, size_t size); + + /** More efficient version of read(dst, 0, bytesWritten()). */ + void copyTo(void* dst) const; + bool writeToStream(SkWStream* dst) const; + + /** Equivalent to copyTo() followed by reset(), but may save memory use. */ + void copyToAndReset(void* dst); + + /** Equivalent to writeToStream() followed by reset(), but may save memory use. */ + bool writeToAndReset(SkWStream* dst); + + /** Equivalent to writeToStream() followed by reset(), but may save memory use. + When the dst is also a SkDynamicMemoryWStream, the implementation is constant time. */ + bool writeToAndReset(SkDynamicMemoryWStream* dst); + + /** Prepend this stream to dst, resetting this. */ + void prependToAndReset(SkDynamicMemoryWStream* dst); + + /** Return the contents as SkData, and then reset the stream. */ + sk_sp<SkData> detachAsData(); + + /** Reset, returning a reader stream with the current content. */ + std::unique_ptr<SkStreamAsset> detachAsStream(); + + /** Reset the stream to its original, empty, state. */ + void reset(); + void padToAlign4(); +private: + struct Block; + Block* fHead = nullptr; + Block* fTail = nullptr; + size_t fBytesWrittenBeforeTail = 0; + +#ifdef SK_DEBUG + void validate() const; +#else + void validate() const {} +#endif + + // For access to the Block type. + friend class SkBlockMemoryStream; + friend class SkBlockMemoryRefCnt; + + using INHERITED = SkWStream; +}; + +#endif diff --git a/src/deps/skia/include/core/SkString.h b/src/deps/skia/include/core/SkString.h new file mode 100644 index 000000000..5576e7422 --- /dev/null +++ b/src/deps/skia/include/core/SkString.h @@ -0,0 +1,302 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkString_DEFINED +#define SkString_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkScalar.h" +#include "include/core/SkTypes.h" +#include "include/private/SkMalloc.h" +#include "include/private/SkTArray.h" +#include "include/private/SkTo.h" + +#include <stdarg.h> +#include <string.h> +#include <atomic> +#include <string> + +namespace skstd { + class string_view; +} + +/* Some helper functions for C strings */ +static inline bool SkStrStartsWith(const char string[], const char prefixStr[]) { + SkASSERT(string); + SkASSERT(prefixStr); + return !strncmp(string, prefixStr, strlen(prefixStr)); +} +static inline bool SkStrStartsWith(const char string[], const char prefixChar) { + SkASSERT(string); + return (prefixChar == *string); +} + +bool SkStrEndsWith(const char string[], const char suffixStr[]); +bool SkStrEndsWith(const char string[], const char suffixChar); + +int SkStrStartsWithOneOf(const char string[], const char prefixes[]); + +static inline int SkStrFind(const char string[], const char substring[]) { + const char *first = strstr(string, substring); + if (nullptr == first) return -1; + return SkToInt(first - &string[0]); +} + +static inline int SkStrFindLastOf(const char string[], const char subchar) { + const char* last = strrchr(string, subchar); + if (nullptr == last) return -1; + return SkToInt(last - &string[0]); +} + +static inline bool SkStrContains(const char string[], const char substring[]) { + SkASSERT(string); + SkASSERT(substring); + return (-1 != SkStrFind(string, substring)); +} +static inline bool SkStrContains(const char string[], const char subchar) { + SkASSERT(string); + char tmp[2]; + tmp[0] = subchar; + tmp[1] = '\0'; + return (-1 != SkStrFind(string, tmp)); +} + +/* + * The SkStrAppend... methods will write into the provided buffer, assuming it is large enough. + * Each method has an associated const (e.g. kSkStrAppendU32_MaxSize) which will be the largest + * value needed for that method's buffer. + * + * char storage[kSkStrAppendU32_MaxSize]; + * SkStrAppendU32(storage, value); + * + * Note : none of the SkStrAppend... methods write a terminating 0 to their buffers. Instead, + * the methods return the ptr to the end of the written part of the buffer. This can be used + * to compute the length, and/or know where to write a 0 if that is desired. + * + * char storage[kSkStrAppendU32_MaxSize + 1]; + * char* stop = SkStrAppendU32(storage, value); + * size_t len = stop - storage; + * *stop = 0; // valid, since storage was 1 byte larger than the max. + */ + +static constexpr int kSkStrAppendU32_MaxSize = 10; +char* SkStrAppendU32(char buffer[], uint32_t); +static constexpr int kSkStrAppendU64_MaxSize = 20; +char* SkStrAppendU64(char buffer[], uint64_t, int minDigits); + +static constexpr int kSkStrAppendS32_MaxSize = kSkStrAppendU32_MaxSize + 1; +char* SkStrAppendS32(char buffer[], int32_t); +static constexpr int kSkStrAppendS64_MaxSize = kSkStrAppendU64_MaxSize + 1; +char* SkStrAppendS64(char buffer[], int64_t, int minDigits); + +/** + * Floats have at most 8 significant digits, so we limit our %g to that. + * However, the total string could be 15 characters: -1.2345678e-005 + * + * In theory we should only expect up to 2 digits for the exponent, but on + * some platforms we have seen 3 (as in the example above). + */ +static constexpr int kSkStrAppendScalar_MaxSize = 15; + +/** + * Write the scalar in decimal format into buffer, and return a pointer to + * the next char after the last one written. Note: a terminating 0 is not + * written into buffer, which must be at least kSkStrAppendScalar_MaxSize. + * Thus if the caller wants to add a 0 at the end, buffer must be at least + * kSkStrAppendScalar_MaxSize + 1 bytes large. + */ +char* SkStrAppendScalar(char buffer[], SkScalar); + +/** \class SkString + + Light weight class for managing strings. Uses reference + counting to make string assignments and copies very fast + with no extra RAM cost. Assumes UTF8 encoding. +*/ +class SK_API SkString { +public: + SkString(); + explicit SkString(size_t len); + explicit SkString(const char text[]); + SkString(const char text[], size_t len); + SkString(const SkString&); + SkString(SkString&&); + explicit SkString(const std::string&); + explicit SkString(skstd::string_view); + ~SkString(); + + bool isEmpty() const { return 0 == fRec->fLength; } + size_t size() const { return (size_t) fRec->fLength; } + const char* c_str() const { return fRec->data(); } + char operator[](size_t n) const { return this->c_str()[n]; } + + bool equals(const SkString&) const; + bool equals(const char text[]) const; + bool equals(const char text[], size_t len) const; + + bool startsWith(const char prefixStr[]) const { + return SkStrStartsWith(fRec->data(), prefixStr); + } + bool startsWith(const char prefixChar) const { + return SkStrStartsWith(fRec->data(), prefixChar); + } + bool endsWith(const char suffixStr[]) const { + return SkStrEndsWith(fRec->data(), suffixStr); + } + bool endsWith(const char suffixChar) const { + return SkStrEndsWith(fRec->data(), suffixChar); + } + bool contains(const char substring[]) const { + return SkStrContains(fRec->data(), substring); + } + bool contains(const char subchar) const { + return SkStrContains(fRec->data(), subchar); + } + int find(const char substring[]) const { + return SkStrFind(fRec->data(), substring); + } + int findLastOf(const char subchar) const { + return SkStrFindLastOf(fRec->data(), subchar); + } + + friend bool operator==(const SkString& a, const SkString& b) { + return a.equals(b); + } + friend bool operator!=(const SkString& a, const SkString& b) { + return !a.equals(b); + } + + // these methods edit the string + + SkString& operator=(const SkString&); + SkString& operator=(SkString&&); + SkString& operator=(const char text[]); + + char* writable_str(); + char& operator[](size_t n) { return this->writable_str()[n]; } + + void reset(); + /** String contents are preserved on resize. (For destructive resize, `set(nullptr, length)`.) + * `resize` automatically reserves an extra byte at the end of the buffer for a null terminator. + */ + void resize(size_t len); + void set(const SkString& src) { *this = src; } + void set(const char text[]); + void set(const char text[], size_t len); + + void insert(size_t offset, const SkString& src) { this->insert(offset, src.c_str(), src.size()); } + void insert(size_t offset, const char text[]); + void insert(size_t offset, const char text[], size_t len); + void insertUnichar(size_t offset, SkUnichar); + void insertS32(size_t offset, int32_t value); + void insertS64(size_t offset, int64_t value, int minDigits = 0); + void insertU32(size_t offset, uint32_t value); + void insertU64(size_t offset, uint64_t value, int minDigits = 0); + void insertHex(size_t offset, uint32_t value, int minDigits = 0); + void insertScalar(size_t offset, SkScalar); + + void append(const SkString& str) { this->insert((size_t)-1, str); } + void append(const char text[]) { this->insert((size_t)-1, text); } + void append(const char text[], size_t len) { this->insert((size_t)-1, text, len); } + void appendUnichar(SkUnichar uni) { this->insertUnichar((size_t)-1, uni); } + void appendS32(int32_t value) { this->insertS32((size_t)-1, value); } + void appendS64(int64_t value, int minDigits = 0) { this->insertS64((size_t)-1, value, minDigits); } + void appendU32(uint32_t value) { this->insertU32((size_t)-1, value); } + void appendU64(uint64_t value, int minDigits = 0) { this->insertU64((size_t)-1, value, minDigits); } + void appendHex(uint32_t value, int minDigits = 0) { this->insertHex((size_t)-1, value, minDigits); } + void appendScalar(SkScalar value) { this->insertScalar((size_t)-1, value); } + + void prepend(const SkString& str) { this->insert(0, str); } + void prepend(const char text[]) { this->insert(0, text); } + void prepend(const char text[], size_t len) { this->insert(0, text, len); } + void prependUnichar(SkUnichar uni) { this->insertUnichar(0, uni); } + void prependS32(int32_t value) { this->insertS32(0, value); } + void prependS64(int32_t value, int minDigits = 0) { this->insertS64(0, value, minDigits); } + void prependHex(uint32_t value, int minDigits = 0) { this->insertHex(0, value, minDigits); } + void prependScalar(SkScalar value) { this->insertScalar((size_t)-1, value); } + + void printf(const char format[], ...) SK_PRINTF_LIKE(2, 3); + void printVAList(const char format[], va_list); + void appendf(const char format[], ...) SK_PRINTF_LIKE(2, 3); + void appendVAList(const char format[], va_list); + void prependf(const char format[], ...) SK_PRINTF_LIKE(2, 3); + void prependVAList(const char format[], va_list); + + void remove(size_t offset, size_t length); + + SkString& operator+=(const SkString& s) { this->append(s); return *this; } + SkString& operator+=(const char text[]) { this->append(text); return *this; } + SkString& operator+=(const char c) { this->append(&c, 1); return *this; } + + /** + * Swap contents between this and other. This function is guaranteed + * to never fail or throw. + */ + void swap(SkString& other); + +private: + struct Rec { + public: + constexpr Rec(uint32_t len, int32_t refCnt) : fLength(len), fRefCnt(refCnt) {} + static sk_sp<Rec> Make(const char text[], size_t len); + char* data() { return fBeginningOfData; } + const char* data() const { return fBeginningOfData; } + void ref() const; + void unref() const; + bool unique() const; +#ifdef SK_DEBUG + int32_t getRefCnt() const; +#endif + uint32_t fLength; // logically size_t, but we want it to stay 32 bits + + private: + mutable std::atomic<int32_t> fRefCnt; + char fBeginningOfData[1] = {'\0'}; + + // Ensure the unsized delete is called. + void operator delete(void* p) { ::operator delete(p); } + }; + sk_sp<Rec> fRec; + +#ifdef SK_DEBUG + const SkString& validate() const; +#else + const SkString& validate() const { return *this; } +#endif + + static const Rec gEmptyRec; +}; + +/// Creates a new string and writes into it using a printf()-style format. +SkString SkStringPrintf(const char* format, ...) SK_PRINTF_LIKE(1, 2); +/// This makes it easier to write a caller as a VAR_ARGS function where the format string is +/// optional. +static inline SkString SkStringPrintf() { return SkString(); } + +static inline void swap(SkString& a, SkString& b) { + a.swap(b); +} + +enum SkStrSplitMode { + // Strictly return all results. If the input is ",," and the separator is ',' this will return + // an array of three empty strings. + kStrict_SkStrSplitMode, + + // Only nonempty results will be added to the results. Multiple separators will be + // coalesced. Separators at the beginning and end of the input will be ignored. If the input is + // ",," and the separator is ',', this will return an empty vector. + kCoalesce_SkStrSplitMode +}; + +// Split str on any characters in delimiters into out. (Think, strtok with a sane API.) +void SkStrSplit(const char* str, const char* delimiters, SkStrSplitMode splitMode, + SkTArray<SkString>* out); +inline void SkStrSplit(const char* str, const char* delimiters, SkTArray<SkString>* out) { + SkStrSplit(str, delimiters, kCoalesce_SkStrSplitMode, out); +} + +#endif diff --git a/src/deps/skia/include/core/SkStringView.h b/src/deps/skia/include/core/SkStringView.h new file mode 100644 index 000000000..184e681d2 --- /dev/null +++ b/src/deps/skia/include/core/SkStringView.h @@ -0,0 +1,185 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkStringView_DEFINED +#define SkStringView_DEFINED + +#include <algorithm> +#include <cstring> +#include <string> + +namespace skstd { + +class string_view { +public: + using value_type = char; + using traits_type = std::char_traits<value_type>; + using const_pointer = const value_type*; + using const_reference = const value_type&; + using iterator = const_pointer; + using const_iterator = iterator; + using size_type = size_t; + static constexpr size_type npos = size_type(-1); + + constexpr string_view() + : fData(nullptr) + , fLength(0) {} + + constexpr string_view(const string_view&) = default; + + constexpr string_view(const_pointer data, size_type length) + : fData(data) + , fLength(length) {} + + string_view(const_pointer data) + : string_view(data, strlen(data)) {} + + string_view(const std::string& str) + : string_view(str.data(), str.length()) {} + + constexpr string_view& operator=(const string_view&) = default; + + constexpr iterator begin() const { + return fData; + } + + constexpr iterator end() const { + return fData + fLength; + } + + constexpr const_reference operator[](size_type idx) const { + return fData[idx]; + } + + constexpr const_reference front() const { + return fData[0]; + } + + constexpr const_reference back() const { + return fData[fLength - 1]; + } + + constexpr const_pointer data() const { + return fData; + } + + constexpr size_type size() const { + return fLength; + } + + constexpr size_type length() const { + return fLength; + } + + constexpr bool empty() const { + return fLength == 0; + } + + constexpr bool starts_with(string_view s) const { + if (s.length() > fLength) { + return false; + } + return s.length() == 0 || !memcmp(fData, s.fData, s.length()); + } + + constexpr bool starts_with(value_type c) const { + return !this->empty() && this->front() == c; + } + + constexpr bool ends_with(string_view s) const { + if (s.length() > fLength) { + return false; + } + return s.length() == 0 || !memcmp(this->end() - s.length(), s.fData, s.length()); + } + + constexpr bool ends_with(value_type c) const { + return !this->empty() && this->back() == c; + } + + size_type find(string_view needle, size_type pos = 0) const { + if (needle.length() == 0) { + return 0; + } + if (this->length() < needle.length()) { + return npos; + } + const char* match = nullptr; + const char* start = this->data() + pos; + const char* end = start + this->length() - needle.length() + 1; + while ((match = (const char*)(memchr(start, needle[0], (size_t)(end - start))))) { + if (!memcmp(match, needle.data(), needle.length())) { + return (size_type)(match - this->data()); + } else { + start = match + 1; + } + } + return npos; + } + + bool contains(string_view needle) const { + return this->find(needle) != npos; + } + + constexpr string_view substr(size_type pos = 0, size_type count = npos) const { + if (pos > fLength) { + return {}; + } + return string_view{fData + pos, std::min(count, fLength - pos)}; + } + + constexpr void swap(string_view& other) { + const_pointer tempData = fData; + fData = other.fData; + other.fData = tempData; + + size_type tempLength = fLength; + fLength = other.fLength; + other.fLength = tempLength; + } + + constexpr void remove_prefix(size_type n) { + fData += n; + fLength -= n; + } + + constexpr void remove_suffix(size_type n) { + fLength -= n; + } + +private: + const_pointer fData; + size_type fLength; +}; + +bool operator==(string_view left, string_view right); + +bool operator!=(string_view left, string_view right); + +bool operator<(string_view left, string_view right); + +bool operator<=(string_view left, string_view right); + +bool operator>(string_view left, string_view right); + +bool operator>=(string_view left, string_view right); + +} // namespace skstd + +namespace std { + template<> struct hash<skstd::string_view> { + size_t operator()(const skstd::string_view& s) const { + size_t result = 0; + for (auto iter = s.begin(); iter != s.end(); ++iter) { + result = result * 101 + (size_t) *iter; + } + return result; + } + }; +} // namespace std + +#endif diff --git a/src/deps/skia/include/core/SkStrokeRec.h b/src/deps/skia/include/core/SkStrokeRec.h new file mode 100644 index 000000000..b4796fcbf --- /dev/null +++ b/src/deps/skia/include/core/SkStrokeRec.h @@ -0,0 +1,154 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkStrokeRec_DEFINED +#define SkStrokeRec_DEFINED + +#include "include/core/SkPaint.h" +#include "include/private/SkMacros.h" + +class SkPath; + +SK_BEGIN_REQUIRE_DENSE +class SK_API SkStrokeRec { +public: + enum InitStyle { + kHairline_InitStyle, + kFill_InitStyle + }; + SkStrokeRec(InitStyle style); + SkStrokeRec(const SkPaint&, SkPaint::Style, SkScalar resScale = 1); + explicit SkStrokeRec(const SkPaint&, SkScalar resScale = 1); + + enum Style { + kHairline_Style, + kFill_Style, + kStroke_Style, + kStrokeAndFill_Style + }; + + static constexpr int kStyleCount = kStrokeAndFill_Style + 1; + + Style getStyle() const; + SkScalar getWidth() const { return fWidth; } + SkScalar getMiter() const { return fMiterLimit; } + SkPaint::Cap getCap() const { return (SkPaint::Cap)fCap; } + SkPaint::Join getJoin() const { return (SkPaint::Join)fJoin; } + + bool isHairlineStyle() const { + return kHairline_Style == this->getStyle(); + } + + bool isFillStyle() const { + return kFill_Style == this->getStyle(); + } + + void setFillStyle(); + void setHairlineStyle(); + /** + * Specify the strokewidth, and optionally if you want stroke + fill. + * Note, if width==0, then this request is taken to mean: + * strokeAndFill==true -> new style will be Fill + * strokeAndFill==false -> new style will be Hairline + */ + void setStrokeStyle(SkScalar width, bool strokeAndFill = false); + + void setStrokeParams(SkPaint::Cap cap, SkPaint::Join join, SkScalar miterLimit) { + fCap = cap; + fJoin = join; + fMiterLimit = miterLimit; + } + + SkScalar getResScale() const { + return fResScale; + } + + void setResScale(SkScalar rs) { + SkASSERT(rs > 0 && SkScalarIsFinite(rs)); + fResScale = rs; + } + + /** + * Returns true if this specifes any thick stroking, i.e. applyToPath() + * will return true. + */ + bool needToApply() const { + Style style = this->getStyle(); + return (kStroke_Style == style) || (kStrokeAndFill_Style == style); + } + + /** + * Apply these stroke parameters to the src path, returning the result + * in dst. + * + * If there was no change (i.e. style == hairline or fill) this returns + * false and dst is unchanged. Otherwise returns true and the result is + * stored in dst. + * + * src and dst may be the same path. + */ + bool applyToPath(SkPath* dst, const SkPath& src) const; + + /** + * Apply these stroke parameters to a paint. + */ + void applyToPaint(SkPaint* paint) const; + + /** + * Gives a conservative value for the outset that should applied to a + * geometries bounds to account for any inflation due to applying this + * strokeRec to the geometry. + */ + SkScalar getInflationRadius() const; + + /** + * Equivalent to: + * SkStrokeRec rec(paint, style); + * rec.getInflationRadius(); + * This does not account for other effects on the paint (i.e. path + * effect). + */ + static SkScalar GetInflationRadius(const SkPaint&, SkPaint::Style); + + static SkScalar GetInflationRadius(SkPaint::Join, SkScalar miterLimit, SkPaint::Cap, + SkScalar strokeWidth); + + /** + * Compare if two SkStrokeRecs have an equal effect on a path. + * Equal SkStrokeRecs produce equal paths. Equality of produced + * paths does not take the ResScale parameter into account. + */ + bool hasEqualEffect(const SkStrokeRec& other) const { + if (!this->needToApply()) { + return this->getStyle() == other.getStyle(); + } + return fWidth == other.fWidth && + (fJoin != SkPaint::kMiter_Join || fMiterLimit == other.fMiterLimit) && + fCap == other.fCap && + fJoin == other.fJoin && + fStrokeAndFill == other.fStrokeAndFill; + } + +private: + void init(const SkPaint&, SkPaint::Style, SkScalar resScale); + + SkScalar fResScale; + SkScalar fWidth; + SkScalar fMiterLimit; + // The following three members are packed together into a single u32. + // This is to avoid unnecessary padding and ensure binary equality for + // hashing (because the padded areas might contain garbage values). + // + // fCap and fJoin are larger than needed to avoid having to initialize + // any pad values + uint32_t fCap : 16; // SkPaint::Cap + uint32_t fJoin : 15; // SkPaint::Join + uint32_t fStrokeAndFill : 1; // bool +}; +SK_END_REQUIRE_DENSE + +#endif diff --git a/src/deps/skia/include/core/SkSurface.h b/src/deps/skia/include/core/SkSurface.h new file mode 100644 index 000000000..3718aaaab --- /dev/null +++ b/src/deps/skia/include/core/SkSurface.h @@ -0,0 +1,1079 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSurface_DEFINED +#define SkSurface_DEFINED + +#include "include/core/SkImage.h" +#include "include/core/SkPixmap.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkSurfaceProps.h" + +#if SK_SUPPORT_GPU +#include "include/gpu/GrTypes.h" +#endif + +#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26 +#include <android/hardware_buffer.h> +#endif + +#ifdef SK_METAL +#include "include/gpu/mtl/GrMtlTypes.h" +#endif + +class SkCanvas; +class SkDeferredDisplayList; +class SkPaint; +class SkSurfaceCharacterization; +class GrBackendRenderTarget; +class GrBackendSemaphore; +class GrBackendSurfaceMutableState; +class GrBackendTexture; +class GrDirectContext; +class GrRecordingContext; +class GrRenderTarget; +enum GrSurfaceOrigin: int; + +/** \class SkSurface + SkSurface is responsible for managing the pixels that a canvas draws into. The pixels can be + allocated either in CPU memory (a raster surface) or on the GPU (a GrRenderTarget surface). + SkSurface takes care of allocating a SkCanvas that will draw into the surface. Call + surface->getCanvas() to use that canvas (but don't delete it, it is owned by the surface). + SkSurface always has non-zero dimensions. If there is a request for a new surface, and either + of the requested dimensions are zero, then nullptr will be returned. +*/ +class SK_API SkSurface : public SkRefCnt { +public: + + /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels. + + SkSurface is returned if all parameters are valid. + Valid parameters include: + info dimensions are greater than zero; + info contains SkColorType and SkAlphaType supported by raster surface; + pixels is not nullptr; + rowBytes is large enough to contain info width pixels of SkColorType. + + Pixel buffer size should be info height times computed rowBytes. + Pixels are not initialized. + To access pixels after drawing, peekPixels() or readPixels(). + + @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace, + of raster surface; width and height must be greater than zero + @param pixels pointer to destination pixels buffer + @param rowBytes interval from one SkSurface row to the next + @param surfaceProps LCD striping orientation and setting for device independent fonts; + may be nullptr + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRasterDirect(const SkImageInfo& imageInfo, void* pixels, + size_t rowBytes, + const SkSurfaceProps* surfaceProps = nullptr); + + static sk_sp<SkSurface> MakeRasterDirect(const SkPixmap& pm, + const SkSurfaceProps* props = nullptr) { + return MakeRasterDirect(pm.info(), pm.writable_addr(), pm.rowBytes(), props); + } + + /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels. + releaseProc is called with pixels and context when SkSurface is deleted. + + SkSurface is returned if all parameters are valid. + Valid parameters include: + info dimensions are greater than zero; + info contains SkColorType and SkAlphaType supported by raster surface; + pixels is not nullptr; + rowBytes is large enough to contain info width pixels of SkColorType. + + Pixel buffer size should be info height times computed rowBytes. + Pixels are not initialized. + To access pixels after drawing, call flush() or peekPixels(). + + @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace, + of raster surface; width and height must be greater than zero + @param pixels pointer to destination pixels buffer + @param rowBytes interval from one SkSurface row to the next + @param releaseProc called when SkSurface is deleted; may be nullptr + @param context passed to releaseProc; may be nullptr + @param surfaceProps LCD striping orientation and setting for device independent fonts; + may be nullptr + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRasterDirectReleaseProc(const SkImageInfo& imageInfo, void* pixels, + size_t rowBytes, + void (*releaseProc)(void* pixels, void* context), + void* context, const SkSurfaceProps* surfaceProps = nullptr); + + /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels. + Allocates and zeroes pixel memory. Pixel memory size is imageInfo.height() times + rowBytes, or times imageInfo.minRowBytes() if rowBytes is zero. + Pixel memory is deleted when SkSurface is deleted. + + SkSurface is returned if all parameters are valid. + Valid parameters include: + info dimensions are greater than zero; + info contains SkColorType and SkAlphaType supported by raster surface; + rowBytes is large enough to contain info width pixels of SkColorType, or is zero. + + If rowBytes is zero, a suitable value will be chosen internally. + + @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace, + of raster surface; width and height must be greater than zero + @param rowBytes interval from one SkSurface row to the next; may be zero + @param surfaceProps LCD striping orientation and setting for device independent fonts; + may be nullptr + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRaster(const SkImageInfo& imageInfo, size_t rowBytes, + const SkSurfaceProps* surfaceProps); + + /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels. + Allocates and zeroes pixel memory. Pixel memory size is imageInfo.height() times + imageInfo.minRowBytes(). + Pixel memory is deleted when SkSurface is deleted. + + SkSurface is returned if all parameters are valid. + Valid parameters include: + info dimensions are greater than zero; + info contains SkColorType and SkAlphaType supported by raster surface. + + @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace, + of raster surface; width and height must be greater than zero + @param props LCD striping orientation and setting for device independent fonts; + may be nullptr + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRaster(const SkImageInfo& imageInfo, + const SkSurfaceProps* props = nullptr) { + return MakeRaster(imageInfo, 0, props); + } + + /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels. + Allocates and zeroes pixel memory. Pixel memory size is height times width times + four. Pixel memory is deleted when SkSurface is deleted. + + Internally, sets SkImageInfo to width, height, native color type, and + kPremul_SkAlphaType. + + SkSurface is returned if width and height are greater than zero. + + Use to create SkSurface that matches SkPMColor, the native pixel arrangement on + the platform. SkSurface drawn to output device skips converting its pixel format. + + @param width pixel column count; must be greater than zero + @param height pixel row count; must be greater than zero + @param surfaceProps LCD striping orientation and setting for device independent + fonts; may be nullptr + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRasterN32Premul(int width, int height, + const SkSurfaceProps* surfaceProps = nullptr); + + /** Caller data passed to RenderTarget/TextureReleaseProc; may be nullptr. */ + typedef void* ReleaseContext; + + /** User function called when supplied render target may be deleted. */ + typedef void (*RenderTargetReleaseProc)(ReleaseContext releaseContext); + + /** User function called when supplied texture may be deleted. */ + typedef void (*TextureReleaseProc)(ReleaseContext releaseContext); + + /** Wraps a GPU-backed texture into SkSurface. Caller must ensure the texture is + valid for the lifetime of returned SkSurface. If sampleCnt greater than zero, + creates an intermediate MSAA SkSurface which is used for drawing backendTexture. + + SkSurface is returned if all parameters are valid. backendTexture is valid if + its pixel configuration agrees with colorSpace and context; for instance, if + backendTexture has an sRGB configuration, then context must support sRGB, + and colorSpace must be present. Further, backendTexture width and height must + not exceed context capabilities, and the context must be able to support + back-end textures. + + Upon success textureReleaseProc is called when it is safe to delete the texture in the + backend API (accounting only for use of the texture by this surface). If SkSurface creation + fails textureReleaseProc is called before this function returns. + + If SK_SUPPORT_GPU is defined as zero, has no effect and returns nullptr. + + @param context GPU context + @param backendTexture texture residing on GPU + @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing + @param colorSpace range of colors; may be nullptr + @param surfaceProps LCD striping orientation and setting for device independent + fonts; may be nullptr + @param textureReleaseProc function called when texture can be released + @param releaseContext state passed to textureReleaseProc + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeFromBackendTexture(GrRecordingContext* context, + const GrBackendTexture& backendTexture, + GrSurfaceOrigin origin, int sampleCnt, + SkColorType colorType, + sk_sp<SkColorSpace> colorSpace, + const SkSurfaceProps* surfaceProps, + TextureReleaseProc textureReleaseProc = nullptr, + ReleaseContext releaseContext = nullptr); + + /** Wraps a GPU-backed buffer into SkSurface. Caller must ensure backendRenderTarget + is valid for the lifetime of returned SkSurface. + + SkSurface is returned if all parameters are valid. backendRenderTarget is valid if + its pixel configuration agrees with colorSpace and context; for instance, if + backendRenderTarget has an sRGB configuration, then context must support sRGB, + and colorSpace must be present. Further, backendRenderTarget width and height must + not exceed context capabilities, and the context must be able to support + back-end render targets. + + Upon success releaseProc is called when it is safe to delete the render target in the + backend API (accounting only for use of the render target by this surface). If SkSurface + creation fails releaseProc is called before this function returns. + + If SK_SUPPORT_GPU is defined as zero, has no effect and returns nullptr. + + @param context GPU context + @param backendRenderTarget GPU intermediate memory buffer + @param colorSpace range of colors + @param surfaceProps LCD striping orientation and setting for device independent + fonts; may be nullptr + @param releaseProc function called when backendRenderTarget can be released + @param releaseContext state passed to releaseProc + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeFromBackendRenderTarget(GrRecordingContext* context, + const GrBackendRenderTarget& backendRenderTarget, + GrSurfaceOrigin origin, + SkColorType colorType, + sk_sp<SkColorSpace> colorSpace, + const SkSurfaceProps* surfaceProps, + RenderTargetReleaseProc releaseProc = nullptr, + ReleaseContext releaseContext = nullptr); + + /** Returns SkSurface on GPU indicated by context. Allocates memory for + pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted + selects whether allocation for pixels is tracked by context. imageInfo + describes the pixel format in SkColorType, and transparency in + SkAlphaType, and color matching in SkColorSpace. + + sampleCount requests the number of samples per pixel. + Pass zero to disable multi-sample anti-aliasing. The request is rounded + up to the next supported count, or rounded down if it is larger than the + maximum supported count. + + surfaceOrigin pins either the top-left or the bottom-left corner to the origin. + + shouldCreateWithMips hints that SkImage returned by makeImageSnapshot() is mip map. + + If SK_SUPPORT_GPU is defined as zero, has no effect and returns nullptr. + + @param context GPU context + @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace; + width, or height, or both, may be zero + @param sampleCount samples per pixel, or 0 to disable full scene anti-aliasing + @param surfaceProps LCD striping orientation and setting for device independent + fonts; may be nullptr + @param shouldCreateWithMips hint that SkSurface will host mip map images + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context, SkBudgeted budgeted, + const SkImageInfo& imageInfo, + int sampleCount, GrSurfaceOrigin surfaceOrigin, + const SkSurfaceProps* surfaceProps, + bool shouldCreateWithMips = false); + + /** Returns SkSurface on GPU indicated by context. Allocates memory for + pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted + selects whether allocation for pixels is tracked by context. imageInfo + describes the pixel format in SkColorType, and transparency in + SkAlphaType, and color matching in SkColorSpace. + + sampleCount requests the number of samples per pixel. + Pass zero to disable multi-sample anti-aliasing. The request is rounded + up to the next supported count, or rounded down if it is larger than the + maximum supported count. + + SkSurface bottom-left corner is pinned to the origin. + + @param context GPU context + @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace, + of raster surface; width, or height, or both, may be zero + @param sampleCount samples per pixel, or 0 to disable multi-sample anti-aliasing + @param surfaceProps LCD striping orientation and setting for device independent + fonts; may be nullptr + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context, SkBudgeted budgeted, + const SkImageInfo& imageInfo, int sampleCount, + const SkSurfaceProps* surfaceProps) { +#if SK_SUPPORT_GPU + return MakeRenderTarget(context, budgeted, imageInfo, sampleCount, + kBottomLeft_GrSurfaceOrigin, surfaceProps); +#else + // TODO(kjlubick, scroggo) Remove this once Android is updated. + return nullptr; +#endif + } + + /** Returns SkSurface on GPU indicated by context. Allocates memory for + pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted + selects whether allocation for pixels is tracked by context. imageInfo + describes the pixel format in SkColorType, and transparency in + SkAlphaType, and color matching in SkColorSpace. + + SkSurface bottom-left corner is pinned to the origin. + + @param context GPU context + @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace, + of raster surface; width, or height, or both, may be zero + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context, SkBudgeted budgeted, + const SkImageInfo& imageInfo) { +#if SK_SUPPORT_GPU + if (!imageInfo.width() || !imageInfo.height()) { + return nullptr; + } + return MakeRenderTarget(context, budgeted, imageInfo, 0, kBottomLeft_GrSurfaceOrigin, + nullptr); +#else + // TODO(kjlubick, scroggo) Remove this once Android is updated. + return nullptr; +#endif + } + + /** Returns SkSurface on GPU indicated by context that is compatible with the provided + characterization. budgeted selects whether allocation for pixels is tracked by context. + + @param context GPU context + @param characterization description of the desired SkSurface + @return SkSurface if all parameters are valid; otherwise, nullptr + */ + static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context, + const SkSurfaceCharacterization& characterization, + SkBudgeted budgeted); + + +#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26 + /** Private. + Creates SkSurface from Android hardware buffer. + Returned SkSurface takes a reference on the buffer. The ref on the buffer will be released + when the SkSurface is destroyed and there is no pending work on the GPU involving the + buffer. + + Only available on Android, when __ANDROID_API__ is defined to be 26 or greater. + + Currently this is only supported for buffers that can be textured as well as rendered to. + In other words that must have both AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT and + AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE usage bits. + + @param context GPU context + @param hardwareBuffer AHardwareBuffer Android hardware buffer + @param colorSpace range of colors; may be nullptr + @param surfaceProps LCD striping orientation and setting for device independent + fonts; may be nullptr + @param fromWindow Whether or not the AHardwareBuffer is part of an Android Window. + Currently only used with Vulkan backend. + @return created SkSurface, or nullptr + */ + static sk_sp<SkSurface> MakeFromAHardwareBuffer(GrDirectContext* context, + AHardwareBuffer* hardwareBuffer, + GrSurfaceOrigin origin, + sk_sp<SkColorSpace> colorSpace, + const SkSurfaceProps* surfaceProps +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + , bool fromWindow = false +#endif // SK_BUILD_FOR_ANDROID_FRAMEWORK + ); +#endif + +#ifdef SK_METAL + /** Creates SkSurface from CAMetalLayer. + Returned SkSurface takes a reference on the CAMetalLayer. The ref on the layer will be + released when the SkSurface is destroyed. + + Only available when Metal API is enabled. + + Will grab the current drawable from the layer and use its texture as a backendRT to + create a renderable surface. + + @param context GPU context + @param layer GrMTLHandle (expected to be a CAMetalLayer*) + @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing + @param colorSpace range of colors; may be nullptr + @param surfaceProps LCD striping orientation and setting for device independent + fonts; may be nullptr + @param drawable Pointer to drawable to be filled in when this surface is + instantiated; may not be nullptr + @return created SkSurface, or nullptr + */ + static sk_sp<SkSurface> MakeFromCAMetalLayer(GrRecordingContext* context, + GrMTLHandle layer, + GrSurfaceOrigin origin, + int sampleCnt, + SkColorType colorType, + sk_sp<SkColorSpace> colorSpace, + const SkSurfaceProps* surfaceProps, + GrMTLHandle* drawable) + SK_API_AVAILABLE_CA_METAL_LAYER; + + /** Creates SkSurface from MTKView. + Returned SkSurface takes a reference on the MTKView. The ref on the layer will be + released when the SkSurface is destroyed. + + Only available when Metal API is enabled. + + Will grab the current drawable from the layer and use its texture as a backendRT to + create a renderable surface. + + @param context GPU context + @param layer GrMTLHandle (expected to be a MTKView*) + @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing + @param colorSpace range of colors; may be nullptr + @param surfaceProps LCD striping orientation and setting for device independent + fonts; may be nullptr + @return created SkSurface, or nullptr + */ + static sk_sp<SkSurface> MakeFromMTKView(GrRecordingContext* context, + GrMTLHandle mtkView, + GrSurfaceOrigin origin, + int sampleCnt, + SkColorType colorType, + sk_sp<SkColorSpace> colorSpace, + const SkSurfaceProps* surfaceProps) + SK_API_AVAILABLE(macos(10.11), ios(9.0)); +#endif + + /** Is this surface compatible with the provided characterization? + + This method can be used to determine if an existing SkSurface is a viable destination + for an SkDeferredDisplayList. + + @param characterization The characterization for which a compatibility check is desired + @return true if this surface is compatible with the characterization; + false otherwise + */ + bool isCompatible(const SkSurfaceCharacterization& characterization) const; + + /** Returns SkSurface without backing pixels. Drawing to SkCanvas returned from SkSurface + has no effect. Calling makeImageSnapshot() on returned SkSurface returns nullptr. + + @param width one or greater + @param height one or greater + @return SkSurface if width and height are positive; otherwise, nullptr + + example: https://fiddle.skia.org/c/@Surface_MakeNull + */ + static sk_sp<SkSurface> MakeNull(int width, int height); + + /** Returns pixel count in each row; may be zero or greater. + + @return number of pixel columns + */ + int width() const { return fWidth; } + + /** Returns pixel row count; may be zero or greater. + + @return number of pixel rows + */ + int height() const { return fHeight; } + + /** Returns an ImageInfo describing the surface. + */ + SkImageInfo imageInfo(); + + /** Returns unique value identifying the content of SkSurface. Returned value changes + each time the content changes. Content is changed by drawing, or by calling + notifyContentWillChange(). + + @return unique content identifier + + example: https://fiddle.skia.org/c/@Surface_notifyContentWillChange + */ + uint32_t generationID(); + + /** \enum SkSurface::ContentChangeMode + ContentChangeMode members are parameters to notifyContentWillChange(). + */ + enum ContentChangeMode { + kDiscard_ContentChangeMode, //!< discards surface on change + kRetain_ContentChangeMode, //!< preserves surface on change + }; + + /** Notifies that SkSurface contents will be changed by code outside of Skia. + Subsequent calls to generationID() return a different value. + + TODO: Can kRetain_ContentChangeMode be deprecated? + + example: https://fiddle.skia.org/c/@Surface_notifyContentWillChange + */ + void notifyContentWillChange(ContentChangeMode mode); + + /** Returns the recording context being used by the SkSurface. + + @return the recording context, if available; nullptr otherwise + */ + GrRecordingContext* recordingContext(); + +#if SK_SUPPORT_GPU + enum BackendHandleAccess { + kFlushRead_BackendHandleAccess, //!< back-end object is readable + kFlushWrite_BackendHandleAccess, //!< back-end object is writable + kDiscardWrite_BackendHandleAccess, //!< back-end object must be overwritten + }; + + /** Deprecated. + */ + static const BackendHandleAccess kFlushRead_TextureHandleAccess = + kFlushRead_BackendHandleAccess; + + /** Deprecated. + */ + static const BackendHandleAccess kFlushWrite_TextureHandleAccess = + kFlushWrite_BackendHandleAccess; + + /** Deprecated. + */ + static const BackendHandleAccess kDiscardWrite_TextureHandleAccess = + kDiscardWrite_BackendHandleAccess; + + /** Retrieves the back-end texture. If SkSurface has no back-end texture, an invalid + object is returned. Call GrBackendTexture::isValid to determine if the result + is valid. + + The returned GrBackendTexture should be discarded if the SkSurface is drawn to or deleted. + + @return GPU texture reference; invalid on failure + */ + GrBackendTexture getBackendTexture(BackendHandleAccess backendHandleAccess); + + /** Retrieves the back-end render target. If SkSurface has no back-end render target, an invalid + object is returned. Call GrBackendRenderTarget::isValid to determine if the result + is valid. + + The returned GrBackendRenderTarget should be discarded if the SkSurface is drawn to + or deleted. + + @return GPU render target reference; invalid on failure + */ + GrBackendRenderTarget getBackendRenderTarget(BackendHandleAccess backendHandleAccess); + + /** If the surface was made via MakeFromBackendTexture then it's backing texture may be + substituted with a different texture. The contents of the previous backing texture are + copied into the new texture. SkCanvas state is preserved. The original sample count is + used. The GrBackendFormat and dimensions of replacement texture must match that of + the original. + + Upon success textureReleaseProc is called when it is safe to delete the texture in the + backend API (accounting only for use of the texture by this surface). If SkSurface creation + fails textureReleaseProc is called before this function returns. + + @param backendTexture the new backing texture for the surface + @param mode Retain or discard current Content + @param textureReleaseProc function called when texture can be released + @param releaseContext state passed to textureReleaseProc + */ + bool replaceBackendTexture(const GrBackendTexture& backendTexture, + GrSurfaceOrigin origin, + ContentChangeMode mode = kRetain_ContentChangeMode, + TextureReleaseProc textureReleaseProc = nullptr, + ReleaseContext releaseContext = nullptr); +#endif + + /** Returns SkCanvas that draws into SkSurface. Subsequent calls return the same SkCanvas. + SkCanvas returned is managed and owned by SkSurface, and is deleted when SkSurface + is deleted. + + @return drawing SkCanvas for SkSurface + + example: https://fiddle.skia.org/c/@Surface_getCanvas + */ + SkCanvas* getCanvas(); + + /** Returns a compatible SkSurface, or nullptr. Returned SkSurface contains + the same raster, GPU, or null properties as the original. Returned SkSurface + does not share the same pixels. + + Returns nullptr if imageInfo width or height are zero, or if imageInfo + is incompatible with SkSurface. + + @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace, + of SkSurface; width and height must be greater than zero + @return compatible SkSurface or nullptr + + example: https://fiddle.skia.org/c/@Surface_makeSurface + */ + sk_sp<SkSurface> makeSurface(const SkImageInfo& imageInfo); + + /** Calls makeSurface(ImageInfo) with the same ImageInfo as this surface, but with the + * specified width and height. + */ + sk_sp<SkSurface> makeSurface(int width, int height); + + /** Returns SkImage capturing SkSurface contents. Subsequent drawing to SkSurface contents + are not captured. SkImage allocation is accounted for if SkSurface was created with + SkBudgeted::kYes. + + @return SkImage initialized with SkSurface contents + + example: https://fiddle.skia.org/c/@Surface_makeImageSnapshot + */ + sk_sp<SkImage> makeImageSnapshot(); + + /** + * Like the no-parameter version, this returns an image of the current surface contents. + * This variant takes a rectangle specifying the subset of the surface that is of interest. + * These bounds will be sanitized before being used. + * - If bounds extends beyond the surface, it will be trimmed to just the intersection of + * it and the surface. + * - If bounds does not intersect the surface, then this returns nullptr. + * - If bounds == the surface, then this is the same as calling the no-parameter variant. + + example: https://fiddle.skia.org/c/@Surface_makeImageSnapshot_2 + */ + sk_sp<SkImage> makeImageSnapshot(const SkIRect& bounds); + + /** Draws SkSurface contents to canvas, with its top-left corner at (x, y). + + If SkPaint paint is not nullptr, apply SkColorFilter, alpha, SkImageFilter, and SkBlendMode. + + @param canvas SkCanvas drawn into + @param x horizontal offset in SkCanvas + @param y vertical offset in SkCanvas + @param sampling what technique to use when sampling the surface pixels + @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter, + and so on; or nullptr + + example: https://fiddle.skia.org/c/@Surface_draw + */ + void draw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkSamplingOptions& sampling, + const SkPaint* paint); + + void draw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint = nullptr) { + this->draw(canvas, x, y, SkSamplingOptions(), paint); + } + + /** Copies SkSurface pixel address, row bytes, and SkImageInfo to SkPixmap, if address + is available, and returns true. If pixel address is not available, return + false and leave SkPixmap unchanged. + + pixmap contents become invalid on any future change to SkSurface. + + @param pixmap storage for pixel state if pixels are readable; otherwise, ignored + @return true if SkSurface has direct access to pixels + + example: https://fiddle.skia.org/c/@Surface_peekPixels + */ + bool peekPixels(SkPixmap* pixmap); + + /** Copies SkRect of pixels to dst. + + Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()). + Destination SkRect corners are (0, 0) and (dst.width(), dst.height()). + Copies each readable pixel intersecting both rectangles, without scaling, + converting to dst.colorType() and dst.alphaType() if required. + + Pixels are readable when SkSurface is raster, or backed by a GPU. + + The destination pixel storage must be allocated by the caller. + + Pixel values are converted only if SkColorType and SkAlphaType + do not match. Only pixels within both source and destination rectangles + are copied. dst contents outside SkRect intersection are unchanged. + + Pass negative values for srcX or srcY to offset pixels across or down destination. + + Does not copy, and returns false if: + - Source and destination rectangles do not intersect. + - SkPixmap pixels could not be allocated. + - dst.rowBytes() is too small to contain one row of pixels. + + @param dst storage for pixels copied from SkSurface + @param srcX offset into readable pixels on x-axis; may be negative + @param srcY offset into readable pixels on y-axis; may be negative + @return true if pixels were copied + + example: https://fiddle.skia.org/c/@Surface_readPixels + */ + bool readPixels(const SkPixmap& dst, int srcX, int srcY); + + /** Copies SkRect of pixels from SkCanvas into dstPixels. + + Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()). + Destination SkRect corners are (0, 0) and (dstInfo.width(), dstInfo.height()). + Copies each readable pixel intersecting both rectangles, without scaling, + converting to dstInfo.colorType() and dstInfo.alphaType() if required. + + Pixels are readable when SkSurface is raster, or backed by a GPU. + + The destination pixel storage must be allocated by the caller. + + Pixel values are converted only if SkColorType and SkAlphaType + do not match. Only pixels within both source and destination rectangles + are copied. dstPixels contents outside SkRect intersection are unchanged. + + Pass negative values for srcX or srcY to offset pixels across or down destination. + + Does not copy, and returns false if: + - Source and destination rectangles do not intersect. + - SkSurface pixels could not be converted to dstInfo.colorType() or dstInfo.alphaType(). + - dstRowBytes is too small to contain one row of pixels. + + @param dstInfo width, height, SkColorType, and SkAlphaType of dstPixels + @param dstPixels storage for pixels; dstInfo.height() times dstRowBytes, or larger + @param dstRowBytes size of one destination row; dstInfo.width() times pixel size, or larger + @param srcX offset into readable pixels on x-axis; may be negative + @param srcY offset into readable pixels on y-axis; may be negative + @return true if pixels were copied + */ + bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes, + int srcX, int srcY); + + /** Copies SkRect of pixels from SkSurface into bitmap. + + Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()). + Destination SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()). + Copies each readable pixel intersecting both rectangles, without scaling, + converting to bitmap.colorType() and bitmap.alphaType() if required. + + Pixels are readable when SkSurface is raster, or backed by a GPU. + + The destination pixel storage must be allocated by the caller. + + Pixel values are converted only if SkColorType and SkAlphaType + do not match. Only pixels within both source and destination rectangles + are copied. dst contents outside SkRect intersection are unchanged. + + Pass negative values for srcX or srcY to offset pixels across or down destination. + + Does not copy, and returns false if: + - Source and destination rectangles do not intersect. + - SkSurface pixels could not be converted to dst.colorType() or dst.alphaType(). + - dst pixels could not be allocated. + - dst.rowBytes() is too small to contain one row of pixels. + + @param dst storage for pixels copied from SkSurface + @param srcX offset into readable pixels on x-axis; may be negative + @param srcY offset into readable pixels on y-axis; may be negative + @return true if pixels were copied + + example: https://fiddle.skia.org/c/@Surface_readPixels_3 + */ + bool readPixels(const SkBitmap& dst, int srcX, int srcY); + + using AsyncReadResult = SkImage::AsyncReadResult; + + /** Client-provided context that is passed to client-provided ReadPixelsContext. */ + using ReadPixelsContext = void*; + + /** Client-provided callback to asyncRescaleAndReadPixels() or + asyncRescaleAndReadPixelsYUV420() that is called when read result is ready or on failure. + */ + using ReadPixelsCallback = void(ReadPixelsContext, std::unique_ptr<const AsyncReadResult>); + + /** Controls the gamma that rescaling occurs in for asyncRescaleAndReadPixels() and + asyncRescaleAndReadPixelsYUV420(). + */ + using RescaleGamma = SkImage::RescaleGamma; + using RescaleMode = SkImage::RescaleMode; + + /** Makes surface pixel data available to caller, possibly asynchronously. It can also rescale + the surface pixels. + + Currently asynchronous reads are only supported on the GPU backend and only when the + underlying 3D API supports transfer buffers and CPU/GPU synchronization primitives. In all + other cases this operates synchronously. + + Data is read from the source sub-rectangle, is optionally converted to a linear gamma, is + rescaled to the size indicated by 'info', is then converted to the color space, color type, + and alpha type of 'info'. A 'srcRect' that is not contained by the bounds of the surface + causes failure. + + When the pixel data is ready the caller's ReadPixelsCallback is called with a + AsyncReadResult containing pixel data in the requested color type, alpha type, and color + space. The AsyncReadResult will have count() == 1. Upon failure the callback is called + with nullptr for AsyncReadResult. For a GPU surface this flushes work but a submit must + occur to guarantee a finite time before the callback is called. + + The data is valid for the lifetime of AsyncReadResult with the exception that if the + SkSurface is GPU-backed the data is immediately invalidated if the context is abandoned + or destroyed. + + @param info info of the requested pixels + @param srcRect subrectangle of surface to read + @param rescaleGamma controls whether rescaling is done in the surface's gamma or whether + the source data is transformed to a linear gamma before rescaling. + @param rescaleMode controls the technique of the rescaling + @param callback function to call with result of the read + @param context passed to callback + */ + void asyncRescaleAndReadPixels(const SkImageInfo& info, + const SkIRect& srcRect, + RescaleGamma rescaleGamma, + RescaleMode rescaleMode, + ReadPixelsCallback callback, + ReadPixelsContext context); + + /** + Similar to asyncRescaleAndReadPixels but performs an additional conversion to YUV. The + RGB->YUV conversion is controlled by 'yuvColorSpace'. The YUV data is returned as three + planes ordered y, u, v. The u and v planes are half the width and height of the resized + rectangle. The y, u, and v values are single bytes. Currently this fails if 'dstSize' + width and height are not even. A 'srcRect' that is not contained by the bounds of the + surface causes failure. + + When the pixel data is ready the caller's ReadPixelsCallback is called with a + AsyncReadResult containing the planar data. The AsyncReadResult will have count() == 3. + Upon failure the callback is called with nullptr for AsyncReadResult. For a GPU surface this + flushes work but a submit must occur to guarantee a finite time before the callback is + called. + + The data is valid for the lifetime of AsyncReadResult with the exception that if the + SkSurface is GPU-backed the data is immediately invalidated if the context is abandoned + or destroyed. + + @param yuvColorSpace The transformation from RGB to YUV. Applied to the resized image + after it is converted to dstColorSpace. + @param dstColorSpace The color space to convert the resized image to, after rescaling. + @param srcRect The portion of the surface to rescale and convert to YUV planes. + @param dstSize The size to rescale srcRect to + @param rescaleGamma controls whether rescaling is done in the surface's gamma or whether + the source data is transformed to a linear gamma before rescaling. + @param rescaleMode controls the sampling technique of the rescaling + @param callback function to call with the planar read result + @param context passed to callback + */ + void asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace, + sk_sp<SkColorSpace> dstColorSpace, + const SkIRect& srcRect, + const SkISize& dstSize, + RescaleGamma rescaleGamma, + RescaleMode rescaleMode, + ReadPixelsCallback callback, + ReadPixelsContext context); + + /** Copies SkRect of pixels from the src SkPixmap to the SkSurface. + + Source SkRect corners are (0, 0) and (src.width(), src.height()). + Destination SkRect corners are (dstX, dstY) and + (dstX + Surface width(), dstY + Surface height()). + + Copies each readable pixel intersecting both rectangles, without scaling, + converting to SkSurface colorType() and SkSurface alphaType() if required. + + @param src storage for pixels to copy to SkSurface + @param dstX x-axis position relative to SkSurface to begin copy; may be negative + @param dstY y-axis position relative to SkSurface to begin copy; may be negative + + example: https://fiddle.skia.org/c/@Surface_writePixels + */ + void writePixels(const SkPixmap& src, int dstX, int dstY); + + /** Copies SkRect of pixels from the src SkBitmap to the SkSurface. + + Source SkRect corners are (0, 0) and (src.width(), src.height()). + Destination SkRect corners are (dstX, dstY) and + (dstX + Surface width(), dstY + Surface height()). + + Copies each readable pixel intersecting both rectangles, without scaling, + converting to SkSurface colorType() and SkSurface alphaType() if required. + + @param src storage for pixels to copy to SkSurface + @param dstX x-axis position relative to SkSurface to begin copy; may be negative + @param dstY y-axis position relative to SkSurface to begin copy; may be negative + + example: https://fiddle.skia.org/c/@Surface_writePixels_2 + */ + void writePixels(const SkBitmap& src, int dstX, int dstY); + + /** Returns SkSurfaceProps for surface. + + @return LCD striping orientation and setting for device independent fonts + */ + const SkSurfaceProps& props() const { return fProps; } + + /** Call to ensure all reads/writes of the surface have been issued to the underlying 3D API. + Skia will correctly order its own draws and pixel operations. This must to be used to ensure + correct ordering when the surface backing store is accessed outside Skia (e.g. direct use of + the 3D API or a windowing system). GrDirectContext has additional flush and submit methods + that apply to all surfaces and images created from a GrDirectContext. This is equivalent to + calling SkSurface::flush with a default GrFlushInfo followed by + GrDirectContext::submit(syncCpu). + */ + void flushAndSubmit(bool syncCpu = false); + + enum class BackendSurfaceAccess { + kNoAccess, //!< back-end object will not be used by client + kPresent, //!< back-end surface will be used for presenting to screen + }; + +#if SK_SUPPORT_GPU + /** Issues pending SkSurface commands to the GPU-backed API objects and resolves any SkSurface + MSAA. A call to GrDirectContext::submit is always required to ensure work is actually sent + to the gpu. Some specific API details: + GL: Commands are actually sent to the driver, but glFlush is never called. Thus some + sync objects from the flush will not be valid until a submission occurs. + + Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command + buffer or encoder objects. However, these objects are not sent to the gpu until a + submission occurs. + + The work that is submitted to the GPU will be dependent on the BackendSurfaceAccess that is + passed in. + + If BackendSurfaceAccess::kNoAccess is passed in all commands will be issued to the GPU. + + If BackendSurfaceAccess::kPresent is passed in and the backend API is not Vulkan, it is + treated the same as kNoAccess. If the backend API is Vulkan, the VkImage that backs the + SkSurface will be transferred back to its original queue. If the SkSurface was created by + wrapping a VkImage, the queue will be set to the queue which was originally passed in on + the GrVkImageInfo. Additionally, if the original queue was not external or foreign the + layout of the VkImage will be set to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR. + + The GrFlushInfo describes additional options to flush. Please see documentation at + GrFlushInfo for more info. + + If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be + submitted to the gpu during the next submit call (it is possible Skia failed to create a + subset of the semaphores). The client should not wait on these semaphores until after submit + has been called, but must keep them alive until then. If a submit flag was passed in with + the flush these valid semaphores can we waited on immediately. If this call returns + GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on + the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in + with the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the + client is still responsible for deleting any initialized semaphores. + Regardless of semaphore submission the context will still be flushed. It should be + emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not + happen. It simply means there were no semaphores submitted to the GPU. A caller should only + take this as a failure if they passed in semaphores to be submitted. + + Pending surface commands are flushed regardless of the return result. + + @param access type of access the call will do on the backend object after flush + @param info flush options + */ + GrSemaphoresSubmitted flush(BackendSurfaceAccess access, const GrFlushInfo& info); + + /** Issues pending SkSurface commands to the GPU-backed API objects and resolves any SkSurface + MSAA. A call to GrDirectContext::submit is always required to ensure work is actually sent + to the gpu. Some specific API details: + GL: Commands are actually sent to the driver, but glFlush is never called. Thus some + sync objects from the flush will not be valid until a submission occurs. + + Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command + buffer or encoder objects. However, these objects are not sent to the gpu until a + submission occurs. + + The GrFlushInfo describes additional options to flush. Please see documentation at + GrFlushInfo for more info. + + If a GrBackendSurfaceMutableState is passed in, at the end of the flush we will transition + the surface to be in the state requested by the GrBackendSurfaceMutableState. If the surface + (or SkImage or GrBackendSurface wrapping the same backend object) is used again after this + flush the state may be changed and no longer match what is requested here. This is often + used if the surface will be used for presenting or external use and the client wants backend + object to be prepped for that use. A finishedProc or semaphore on the GrFlushInfo will also + include the work for any requested state change. + + If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's + VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to + tell Skia to not change those respective states. + + If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be + submitted to the gpu during the next submit call (it is possible Skia failed to create a + subset of the semaphores). The client should not wait on these semaphores until after submit + has been called, but must keep them alive until then. If a submit flag was passed in with + the flush these valid semaphores can we waited on immediately. If this call returns + GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on + the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in + with the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the + client is still responsible for deleting any initialized semaphores. + Regardleess of semaphore submission the context will still be flushed. It should be + emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not + happen. It simply means there were no semaphores submitted to the GPU. A caller should only + take this as a failure if they passed in semaphores to be submitted. + + Pending surface commands are flushed regardless of the return result. + + @param info flush options + @param access optional state change request after flush + */ + GrSemaphoresSubmitted flush(const GrFlushInfo& info, + const GrBackendSurfaceMutableState* newState = nullptr); +#endif // SK_SUPPORT_GPU + + void flush(); + + /** Inserts a list of GPU semaphores that the current GPU-backed API must wait on before + executing any more commands on the GPU for this surface. If this call returns false, then + the GPU back-end will not wait on any passed in semaphores, and the client will still own + the semaphores, regardless of the value of deleteSemaphoresAfterWait. + + If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case + it is the client's responsibility to not destroy or attempt to reuse the semaphores until it + knows that Skia has finished waiting on them. This can be done by using finishedProcs + on flush calls. + + @param numSemaphores size of waitSemaphores array + @param waitSemaphores array of semaphore containers + @paramm deleteSemaphoresAfterWait who owns and should delete the semaphores + @return true if GPU is waiting on semaphores + */ + bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores, + bool deleteSemaphoresAfterWait = true); + + /** Initializes SkSurfaceCharacterization that can be used to perform GPU back-end + processing in a separate thread. Typically this is used to divide drawing + into multiple tiles. SkDeferredDisplayListRecorder records the drawing commands + for each tile. + + Return true if SkSurface supports characterization. raster surface returns false. + + @param characterization properties for parallel drawing + @return true if supported + + example: https://fiddle.skia.org/c/@Surface_characterize + */ + bool characterize(SkSurfaceCharacterization* characterization) const; + + /** Draws the deferred display list created via a SkDeferredDisplayListRecorder. + If the deferred display list is not compatible with this SkSurface, the draw is skipped + and false is return. + + The xOffset and yOffset parameters are experimental and, if not both zero, will cause + the draw to be ignored. + When implemented, if xOffset or yOffset are non-zero, the DDL will be drawn offset by that + amount into the surface. + + @param deferredDisplayList drawing commands + @param xOffset x-offset at which to draw the DDL + @param yOffset y-offset at which to draw the DDL + @return false if deferredDisplayList is not compatible + + example: https://fiddle.skia.org/c/@Surface_draw_2 + */ + bool draw(sk_sp<const SkDeferredDisplayList> deferredDisplayList, + int xOffset = 0, + int yOffset = 0); + +protected: + SkSurface(int width, int height, const SkSurfaceProps* surfaceProps); + SkSurface(const SkImageInfo& imageInfo, const SkSurfaceProps* surfaceProps); + + // called by subclass if their contents have changed + void dirtyGenerationID() { + fGenerationID = 0; + } + +private: + const SkSurfaceProps fProps; + const int fWidth; + const int fHeight; + uint32_t fGenerationID; + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/core/SkSurfaceCharacterization.h b/src/deps/skia/include/core/SkSurfaceCharacterization.h new file mode 100644 index 000000000..91b10e87b --- /dev/null +++ b/src/deps/skia/include/core/SkSurfaceCharacterization.h @@ -0,0 +1,263 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSurfaceCharacterization_DEFINED +#define SkSurfaceCharacterization_DEFINED + + +#include "include/core/SkColorSpace.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkSurfaceProps.h" + +class SkColorSpace; + + +#if SK_SUPPORT_GPU +#include "include/gpu/GrBackendSurface.h" +#include "include/gpu/GrContextThreadSafeProxy.h" +#include "include/gpu/GrTypes.h" + +/** \class SkSurfaceCharacterization + A surface characterization contains all the information Ganesh requires to makes its internal + rendering decisions. When passed into a SkDeferredDisplayListRecorder it will copy the + data and pass it on to the SkDeferredDisplayList if/when it is created. Note that both of + those objects (the Recorder and the DisplayList) will take a ref on the + GrContextThreadSafeProxy and SkColorSpace objects. +*/ +class SK_API SkSurfaceCharacterization { +public: + enum class Textureable : bool { kNo = false, kYes = true }; + enum class MipMapped : bool { kNo = false, kYes = true }; + enum class UsesGLFBO0 : bool { kNo = false, kYes = true }; + // This flag indicates that the backing VkImage for this Vulkan surface will have the + // VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT set. This bit allows skia to handle advanced blends + // more optimally in a shader by being able to directly read the dst values. + enum class VkRTSupportsInputAttachment : bool { kNo = false, kYes = true }; + // This flag indicates if the surface is wrapping a raw Vulkan secondary command buffer. + enum class VulkanSecondaryCBCompatible : bool { kNo = false, kYes = true }; + + SkSurfaceCharacterization() + : fCacheMaxResourceBytes(0) + , fOrigin(kBottomLeft_GrSurfaceOrigin) + , fSampleCnt(0) + , fIsTextureable(Textureable::kYes) + , fIsMipMapped(MipMapped::kYes) + , fUsesGLFBO0(UsesGLFBO0::kNo) + , fVulkanSecondaryCBCompatible(VulkanSecondaryCBCompatible::kNo) + , fIsProtected(GrProtected::kNo) + , fSurfaceProps(0, kUnknown_SkPixelGeometry) { + } + + SkSurfaceCharacterization(SkSurfaceCharacterization&&) = default; + SkSurfaceCharacterization& operator=(SkSurfaceCharacterization&&) = default; + + SkSurfaceCharacterization(const SkSurfaceCharacterization&) = default; + SkSurfaceCharacterization& operator=(const SkSurfaceCharacterization& other) = default; + bool operator==(const SkSurfaceCharacterization& other) const; + bool operator!=(const SkSurfaceCharacterization& other) const { + return !(*this == other); + } + + /* + * Return a new surface characterization with the only difference being a different width + * and height + */ + SkSurfaceCharacterization createResized(int width, int height) const; + + /* + * Return a new surface characterization with only a replaced color space + */ + SkSurfaceCharacterization createColorSpace(sk_sp<SkColorSpace>) const; + + /* + * Return a new surface characterization with the backend format replaced. A colorType + * must also be supplied to indicate the interpretation of the new format. + */ + SkSurfaceCharacterization createBackendFormat(SkColorType colorType, + const GrBackendFormat& backendFormat) const; + + /* + * Return a new surface characterization with just a different use of FBO0 (in GL) + */ + SkSurfaceCharacterization createFBO0(bool usesGLFBO0) const; + + GrContextThreadSafeProxy* contextInfo() const { return fContextInfo.get(); } + sk_sp<GrContextThreadSafeProxy> refContextInfo() const { return fContextInfo; } + size_t cacheMaxResourceBytes() const { return fCacheMaxResourceBytes; } + + bool isValid() const { return kUnknown_SkColorType != fImageInfo.colorType(); } + + const SkImageInfo& imageInfo() const { return fImageInfo; } + const GrBackendFormat& backendFormat() const { return fBackendFormat; } + GrSurfaceOrigin origin() const { return fOrigin; } + SkISize dimensions() const { return fImageInfo.dimensions(); } + int width() const { return fImageInfo.width(); } + int height() const { return fImageInfo.height(); } + SkColorType colorType() const { return fImageInfo.colorType(); } + int sampleCount() const { return fSampleCnt; } + bool isTextureable() const { return Textureable::kYes == fIsTextureable; } + bool isMipMapped() const { return MipMapped::kYes == fIsMipMapped; } + bool usesGLFBO0() const { return UsesGLFBO0::kYes == fUsesGLFBO0; } + bool vkRTSupportsInputAttachment() const { + return VkRTSupportsInputAttachment::kYes == fVkRTSupportsInputAttachment; + } + bool vulkanSecondaryCBCompatible() const { + return VulkanSecondaryCBCompatible::kYes == fVulkanSecondaryCBCompatible; + } + GrProtected isProtected() const { return fIsProtected; } + SkColorSpace* colorSpace() const { return fImageInfo.colorSpace(); } + sk_sp<SkColorSpace> refColorSpace() const { return fImageInfo.refColorSpace(); } + const SkSurfaceProps& surfaceProps()const { return fSurfaceProps; } + + // Is the provided backend texture compatible with this surface characterization? + bool isCompatible(const GrBackendTexture&) const; + +private: + friend class SkSurface_Gpu; // for 'set' & 'config' + friend class GrVkSecondaryCBDrawContext; // for 'set' & 'config' + friend class GrContextThreadSafeProxy; // for private ctor + friend class SkDeferredDisplayListRecorder; // for 'config' + friend class SkSurface; // for 'config' + + SkDEBUGCODE(void validate() const;) + + SkSurfaceCharacterization(sk_sp<GrContextThreadSafeProxy> contextInfo, + size_t cacheMaxResourceBytes, + const SkImageInfo& ii, + const GrBackendFormat& backendFormat, + GrSurfaceOrigin origin, + int sampleCnt, + Textureable isTextureable, + MipMapped isMipMapped, + UsesGLFBO0 usesGLFBO0, + VkRTSupportsInputAttachment vkRTSupportsInputAttachment, + VulkanSecondaryCBCompatible vulkanSecondaryCBCompatible, + GrProtected isProtected, + const SkSurfaceProps& surfaceProps) + : fContextInfo(std::move(contextInfo)) + , fCacheMaxResourceBytes(cacheMaxResourceBytes) + , fImageInfo(ii) + , fBackendFormat(backendFormat) + , fOrigin(origin) + , fSampleCnt(sampleCnt) + , fIsTextureable(isTextureable) + , fIsMipMapped(isMipMapped) + , fUsesGLFBO0(usesGLFBO0) + , fVkRTSupportsInputAttachment(vkRTSupportsInputAttachment) + , fVulkanSecondaryCBCompatible(vulkanSecondaryCBCompatible) + , fIsProtected(isProtected) + , fSurfaceProps(surfaceProps) { + if (fSurfaceProps.flags() & SkSurfaceProps::kDynamicMSAA_Flag) { + // Dynamic MSAA is not currently supported with DDL. + *this = {}; + } + SkDEBUGCODE(this->validate()); + } + + void set(sk_sp<GrContextThreadSafeProxy> contextInfo, + size_t cacheMaxResourceBytes, + const SkImageInfo& ii, + const GrBackendFormat& backendFormat, + GrSurfaceOrigin origin, + int sampleCnt, + Textureable isTextureable, + MipMapped isMipMapped, + UsesGLFBO0 usesGLFBO0, + VkRTSupportsInputAttachment vkRTSupportsInputAttachment, + VulkanSecondaryCBCompatible vulkanSecondaryCBCompatible, + GrProtected isProtected, + const SkSurfaceProps& surfaceProps) { + if (surfaceProps.flags() & SkSurfaceProps::kDynamicMSAA_Flag) { + // Dynamic MSAA is not currently supported with DDL. + *this = {}; + } else { + fContextInfo = contextInfo; + fCacheMaxResourceBytes = cacheMaxResourceBytes; + + fImageInfo = ii; + fBackendFormat = backendFormat; + fOrigin = origin; + fSampleCnt = sampleCnt; + fIsTextureable = isTextureable; + fIsMipMapped = isMipMapped; + fUsesGLFBO0 = usesGLFBO0; + fVkRTSupportsInputAttachment = vkRTSupportsInputAttachment; + fVulkanSecondaryCBCompatible = vulkanSecondaryCBCompatible; + fIsProtected = isProtected; + fSurfaceProps = surfaceProps; + } + SkDEBUGCODE(this->validate()); + } + + sk_sp<GrContextThreadSafeProxy> fContextInfo; + size_t fCacheMaxResourceBytes; + + SkImageInfo fImageInfo; + GrBackendFormat fBackendFormat; + GrSurfaceOrigin fOrigin; + int fSampleCnt; + Textureable fIsTextureable; + MipMapped fIsMipMapped; + UsesGLFBO0 fUsesGLFBO0; + VkRTSupportsInputAttachment fVkRTSupportsInputAttachment; + VulkanSecondaryCBCompatible fVulkanSecondaryCBCompatible; + GrProtected fIsProtected; + SkSurfaceProps fSurfaceProps; +}; + +#else// !SK_SUPPORT_GPU +class GrBackendFormat; + +class SK_API SkSurfaceCharacterization { +public: + SkSurfaceCharacterization() : fSurfaceProps(0, kUnknown_SkPixelGeometry) { } + + SkSurfaceCharacterization createResized(int width, int height) const { + return *this; + } + + SkSurfaceCharacterization createColorSpace(sk_sp<SkColorSpace>) const { + return *this; + } + + SkSurfaceCharacterization createBackendFormat(SkColorType, const GrBackendFormat&) const { + return *this; + } + + SkSurfaceCharacterization createFBO0(bool usesGLFBO0) const { + return *this; + } + + bool operator==(const SkSurfaceCharacterization& other) const { return false; } + bool operator!=(const SkSurfaceCharacterization& other) const { + return !(*this == other); + } + + size_t cacheMaxResourceBytes() const { return 0; } + + bool isValid() const { return false; } + + int width() const { return 0; } + int height() const { return 0; } + int stencilCount() const { return 0; } + bool isTextureable() const { return false; } + bool isMipMapped() const { return false; } + bool usesGLFBO0() const { return false; } + bool vkRTSupportsAttachmentInput() const { return false; } + bool vulkanSecondaryCBCompatible() const { return false; } + SkColorSpace* colorSpace() const { return nullptr; } + sk_sp<SkColorSpace> refColorSpace() const { return nullptr; } + const SkSurfaceProps& surfaceProps()const { return fSurfaceProps; } + +private: + SkSurfaceProps fSurfaceProps; +}; + +#endif + +#endif diff --git a/src/deps/skia/include/core/SkSurfaceProps.h b/src/deps/skia/include/core/SkSurfaceProps.h new file mode 100644 index 000000000..7b07554e5 --- /dev/null +++ b/src/deps/skia/include/core/SkSurfaceProps.h @@ -0,0 +1,92 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSurfaceProps_DEFINED +#define SkSurfaceProps_DEFINED + +#include "include/core/SkTypes.h" + +/** + * Description of how the LCD strips are arranged for each pixel. If this is unknown, or the + * pixels are meant to be "portable" and/or transformed before showing (e.g. rotated, scaled) + * then use kUnknown_SkPixelGeometry. + */ +enum SkPixelGeometry { + kUnknown_SkPixelGeometry, + kRGB_H_SkPixelGeometry, + kBGR_H_SkPixelGeometry, + kRGB_V_SkPixelGeometry, + kBGR_V_SkPixelGeometry, +}; + +// Returns true iff geo is a known geometry and is RGB. +static inline bool SkPixelGeometryIsRGB(SkPixelGeometry geo) { + return kRGB_H_SkPixelGeometry == geo || kRGB_V_SkPixelGeometry == geo; +} + +// Returns true iff geo is a known geometry and is BGR. +static inline bool SkPixelGeometryIsBGR(SkPixelGeometry geo) { + return kBGR_H_SkPixelGeometry == geo || kBGR_V_SkPixelGeometry == geo; +} + +// Returns true iff geo is a known geometry and is horizontal. +static inline bool SkPixelGeometryIsH(SkPixelGeometry geo) { + return kRGB_H_SkPixelGeometry == geo || kBGR_H_SkPixelGeometry == geo; +} + +// Returns true iff geo is a known geometry and is vertical. +static inline bool SkPixelGeometryIsV(SkPixelGeometry geo) { + return kRGB_V_SkPixelGeometry == geo || kBGR_V_SkPixelGeometry == geo; +} + +/** + * Describes properties and constraints of a given SkSurface. The rendering engine can parse these + * during drawing, and can sometimes optimize its performance (e.g. disabling an expensive + * feature). + */ +class SK_API SkSurfaceProps { +public: + enum Flags { + kUseDeviceIndependentFonts_Flag = 1 << 0, + // Use internal MSAA to render to non-MSAA GPU surfaces. + kDynamicMSAA_Flag = 1 << 1 + }; + /** Deprecated alias used by Chromium. Will be removed. */ + static const Flags kUseDistanceFieldFonts_Flag = kUseDeviceIndependentFonts_Flag; + + /** No flags, unknown pixel geometry. */ + SkSurfaceProps(); + SkSurfaceProps(uint32_t flags, SkPixelGeometry); + + SkSurfaceProps(const SkSurfaceProps&); + SkSurfaceProps& operator=(const SkSurfaceProps&); + + SkSurfaceProps cloneWithPixelGeometry(SkPixelGeometry newPixelGeometry) const { + return SkSurfaceProps(fFlags, newPixelGeometry); + } + + uint32_t flags() const { return fFlags; } + SkPixelGeometry pixelGeometry() const { return fPixelGeometry; } + + bool isUseDeviceIndependentFonts() const { + return SkToBool(fFlags & kUseDeviceIndependentFonts_Flag); + } + + bool operator==(const SkSurfaceProps& that) const { + return fFlags == that.fFlags && fPixelGeometry == that.fPixelGeometry; + } + + bool operator!=(const SkSurfaceProps& that) const { + return !(*this == that); + } + +private: + uint32_t fFlags; + SkPixelGeometry fPixelGeometry; +}; + +#endif diff --git a/src/deps/skia/include/core/SkSwizzle.h b/src/deps/skia/include/core/SkSwizzle.h new file mode 100644 index 000000000..61e93b2da --- /dev/null +++ b/src/deps/skia/include/core/SkSwizzle.h @@ -0,0 +1,19 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSwizzle_DEFINED +#define SkSwizzle_DEFINED + +#include "include/core/SkTypes.h" + +/** + Swizzles byte order of |count| 32-bit pixels, swapping R and B. + (RGBA <-> BGRA) +*/ +SK_API void SkSwapRB(uint32_t* dest, const uint32_t* src, int count); + +#endif diff --git a/src/deps/skia/include/core/SkTextBlob.h b/src/deps/skia/include/core/SkTextBlob.h new file mode 100644 index 000000000..d6cda3b27 --- /dev/null +++ b/src/deps/skia/include/core/SkTextBlob.h @@ -0,0 +1,503 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTextBlob_DEFINED +#define SkTextBlob_DEFINED + +#include "include/core/SkFont.h" +#include "include/core/SkPaint.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkString.h" +#include "include/private/SkTemplates.h" + +#include <atomic> + +struct SkRSXform; +struct SkSerialProcs; +struct SkDeserialProcs; + +/** \class SkTextBlob + SkTextBlob combines multiple text runs into an immutable container. Each text + run consists of glyphs, SkPaint, and position. Only parts of SkPaint related to + fonts and text rendering are used by run. +*/ +class SK_API SkTextBlob final : public SkNVRefCnt<SkTextBlob> { +private: + class RunRecord; + +public: + + /** Returns conservative bounding box. Uses SkPaint associated with each glyph to + determine glyph bounds, and unions all bounds. Returned bounds may be + larger than the bounds of all glyphs in runs. + + @return conservative bounding box + */ + const SkRect& bounds() const { return fBounds; } + + /** Returns a non-zero value unique among all text blobs. + + @return identifier for SkTextBlob + */ + uint32_t uniqueID() const { return fUniqueID; } + + /** Returns the number of intervals that intersect bounds. + bounds describes a pair of lines parallel to the text advance. + The return count is zero or a multiple of two, and is at most twice the number of glyphs in + the the blob. + + Pass nullptr for intervals to determine the size of the interval array. + + Runs within the blob that contain SkRSXform are ignored when computing intercepts. + + @param bounds lower and upper line parallel to the advance + @param intervals returned intersections; may be nullptr + @param paint specifies stroking, SkPathEffect that affects the result; may be nullptr + @return number of intersections; may be zero + */ + int getIntercepts(const SkScalar bounds[2], SkScalar intervals[], + const SkPaint* paint = nullptr) const; + + /** Creates SkTextBlob with a single run. + + font contains attributes used to define the run text. + + When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or + SkTextEncoding::kUTF32, this function uses the default + character-to-glyph mapping from the SkTypeface in font. It does not + perform typeface fallback for characters not found in the SkTypeface. + It does not perform kerning or other complex shaping; glyphs are + positioned based on their default advances. + + @param text character code points or glyphs drawn + @param byteLength byte length of text array + @param font text size, typeface, text scale, and so on, used to draw + @param encoding text encoding used in the text array + @return SkTextBlob constructed from one run + */ + static sk_sp<SkTextBlob> MakeFromText(const void* text, size_t byteLength, const SkFont& font, + SkTextEncoding encoding = SkTextEncoding::kUTF8); + + /** Creates SkTextBlob with a single run. string meaning depends on SkTextEncoding; + by default, string is encoded as UTF-8. + + font contains attributes used to define the run text. + + When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or + SkTextEncoding::kUTF32, this function uses the default + character-to-glyph mapping from the SkTypeface in font. It does not + perform typeface fallback for characters not found in the SkTypeface. + It does not perform kerning or other complex shaping; glyphs are + positioned based on their default advances. + + @param string character code points or glyphs drawn + @param font text size, typeface, text scale, and so on, used to draw + @param encoding text encoding used in the text array + @return SkTextBlob constructed from one run + */ + static sk_sp<SkTextBlob> MakeFromString(const char* string, const SkFont& font, + SkTextEncoding encoding = SkTextEncoding::kUTF8) { + if (!string) { + return nullptr; + } + return MakeFromText(string, strlen(string), font, encoding); + } + + /** Returns a textblob built from a single run of text with x-positions and a single y value. + This is equivalent to using SkTextBlobBuilder and calling allocRunPosH(). + Returns nullptr if byteLength is zero. + + @param text character code points or glyphs drawn (based on encoding) + @param byteLength byte length of text array + @param xpos array of x-positions, must contain values for all of the character points. + @param constY shared y-position for each character point, to be paired with each xpos. + @param font SkFont used for this run + @param encoding specifies the encoding of the text array. + @return new textblob or nullptr + */ + static sk_sp<SkTextBlob> MakeFromPosTextH(const void* text, size_t byteLength, + const SkScalar xpos[], SkScalar constY, const SkFont& font, + SkTextEncoding encoding = SkTextEncoding::kUTF8); + + /** Returns a textblob built from a single run of text with positions. + This is equivalent to using SkTextBlobBuilder and calling allocRunPos(). + Returns nullptr if byteLength is zero. + + @param text character code points or glyphs drawn (based on encoding) + @param byteLength byte length of text array + @param pos array of positions, must contain values for all of the character points. + @param font SkFont used for this run + @param encoding specifies the encoding of the text array. + @return new textblob or nullptr + */ + static sk_sp<SkTextBlob> MakeFromPosText(const void* text, size_t byteLength, + const SkPoint pos[], const SkFont& font, + SkTextEncoding encoding = SkTextEncoding::kUTF8); + + static sk_sp<SkTextBlob> MakeFromRSXform(const void* text, size_t byteLength, + const SkRSXform xform[], const SkFont& font, + SkTextEncoding encoding = SkTextEncoding::kUTF8); + + /** Writes data to allow later reconstruction of SkTextBlob. memory points to storage + to receive the encoded data, and memory_size describes the size of storage. + Returns bytes used if provided storage is large enough to hold all data; + otherwise, returns zero. + + procs.fTypefaceProc permits supplying a custom function to encode SkTypeface. + If procs.fTypefaceProc is nullptr, default encoding is used. procs.fTypefaceCtx + may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc + is called with a pointer to SkTypeface and user context. + + @param procs custom serial data encoders; may be nullptr + @param memory storage for data + @param memory_size size of storage + @return bytes written, or zero if required storage is larger than memory_size + + example: https://fiddle.skia.org/c/@TextBlob_serialize + */ + size_t serialize(const SkSerialProcs& procs, void* memory, size_t memory_size) const; + + /** Returns storage containing SkData describing SkTextBlob, using optional custom + encoders. + + procs.fTypefaceProc permits supplying a custom function to encode SkTypeface. + If procs.fTypefaceProc is nullptr, default encoding is used. procs.fTypefaceCtx + may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc + is called with a pointer to SkTypeface and user context. + + @param procs custom serial data encoders; may be nullptr + @return storage containing serialized SkTextBlob + + example: https://fiddle.skia.org/c/@TextBlob_serialize_2 + */ + sk_sp<SkData> serialize(const SkSerialProcs& procs) const; + + /** Recreates SkTextBlob that was serialized into data. Returns constructed SkTextBlob + if successful; otherwise, returns nullptr. Fails if size is smaller than + required data length, or if data does not permit constructing valid SkTextBlob. + + procs.fTypefaceProc permits supplying a custom function to decode SkTypeface. + If procs.fTypefaceProc is nullptr, default decoding is used. procs.fTypefaceCtx + may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc + is called with a pointer to SkTypeface data, data byte length, and user context. + + @param data pointer for serial data + @param size size of data + @param procs custom serial data decoders; may be nullptr + @return SkTextBlob constructed from data in memory + */ + static sk_sp<SkTextBlob> Deserialize(const void* data, size_t size, + const SkDeserialProcs& procs); + + class SK_API Iter { + public: + struct Run { + SkTypeface* fTypeface; + int fGlyphCount; + const uint16_t* fGlyphIndices; +#ifdef SK_UNTIL_CRBUG_1187654_IS_FIXED + const uint32_t* fClusterIndex_forTest; + int fUtf8Size_forTest; + const char* fUtf8_forTest; +#endif + }; + + Iter(const SkTextBlob&); + + /** + * Returns true for each "run" inside the textblob, setting the Run fields (if not null). + * If this returns false, there are no more runs, and the Run parameter will be ignored. + */ + bool next(Run*); + + // Experimental, DO NO USE, will change/go-away + struct ExperimentalRun { + SkFont font; + int count; + const uint16_t* glyphs; + const SkPoint* positions; + }; + bool experimentalNext(ExperimentalRun*); + + private: + const RunRecord* fRunRecord; + }; + +private: + friend class SkNVRefCnt<SkTextBlob>; + + enum GlyphPositioning : uint8_t; + + explicit SkTextBlob(const SkRect& bounds); + + ~SkTextBlob(); + + // Memory for objects of this class is created with sk_malloc rather than operator new and must + // be freed with sk_free. + void operator delete(void* p); + void* operator new(size_t); + void* operator new(size_t, void* p); + + static unsigned ScalarsPerGlyph(GlyphPositioning pos); + + // Call when this blob is part of the key to a cache entry. This allows the cache + // to know automatically those entries can be purged when this SkTextBlob is deleted. + void notifyAddedToCache(uint32_t cacheID) const { + fCacheID.store(cacheID); + } + + friend class SkGlyphRunList; + friend class GrTextBlobCache; + friend class SkTextBlobBuilder; + friend class SkTextBlobPriv; + friend class SkTextBlobRunIterator; + + const SkRect fBounds; + const uint32_t fUniqueID; + mutable std::atomic<uint32_t> fCacheID; + + SkDEBUGCODE(size_t fStorageSize;) + + // The actual payload resides in externally-managed storage, following the object. + // (see the .cpp for more details) + + using INHERITED = SkRefCnt; +}; + +/** \class SkTextBlobBuilder + Helper class for constructing SkTextBlob. +*/ +class SK_API SkTextBlobBuilder { +public: + + /** Constructs empty SkTextBlobBuilder. By default, SkTextBlobBuilder has no runs. + + @return empty SkTextBlobBuilder + + example: https://fiddle.skia.org/c/@TextBlobBuilder_empty_constructor + */ + SkTextBlobBuilder(); + + /** Deletes data allocated internally by SkTextBlobBuilder. + */ + ~SkTextBlobBuilder(); + + /** Returns SkTextBlob built from runs of glyphs added by builder. Returned + SkTextBlob is immutable; it may be copied, but its contents may not be altered. + Returns nullptr if no runs of glyphs were added by builder. + + Resets SkTextBlobBuilder to its initial empty state, allowing it to be + reused to build a new set of runs. + + @return SkTextBlob or nullptr + + example: https://fiddle.skia.org/c/@TextBlobBuilder_make + */ + sk_sp<SkTextBlob> make(); + + /** \struct SkTextBlobBuilder::RunBuffer + RunBuffer supplies storage for glyphs and positions within a run. + + A run is a sequence of glyphs sharing font metrics and positioning. + Each run may position its glyphs in one of three ways: + by specifying where the first glyph is drawn, and allowing font metrics to + determine the advance to subsequent glyphs; by specifying a baseline, and + the position on that baseline for each glyph in run; or by providing SkPoint + array, one per glyph. + */ + struct RunBuffer { + SkGlyphID* glyphs; //!< storage for glyph indexes in run + SkScalar* pos; //!< storage for glyph positions in run + char* utf8text; //!< storage for text UTF-8 code units in run + uint32_t* clusters; //!< storage for glyph clusters (index of UTF-8 code unit) + + // Helpers, since the "pos" field can be different types (always some number of floats). + SkPoint* points() const { return reinterpret_cast<SkPoint*>(pos); } + SkRSXform* xforms() const { return reinterpret_cast<SkRSXform*>(pos); } + }; + + /** Returns run with storage for glyphs. Caller must write count glyphs to + RunBuffer::glyphs before next call to SkTextBlobBuilder. + + RunBuffer::pos, RunBuffer::utf8text, and RunBuffer::clusters should be ignored. + + Glyphs share metrics in font. + + Glyphs are positioned on a baseline at (x, y), using font metrics to + determine their relative placement. + + bounds defines an optional bounding box, used to suppress drawing when SkTextBlob + bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds + is computed from (x, y) and RunBuffer::glyphs metrics. + + @param font SkFont used for this run + @param count number of glyphs + @param x horizontal offset within the blob + @param y vertical offset within the blob + @param bounds optional run bounding box + @return writable glyph buffer + */ + const RunBuffer& allocRun(const SkFont& font, int count, SkScalar x, SkScalar y, + const SkRect* bounds = nullptr); + + /** Returns run with storage for glyphs and positions along baseline. Caller must + write count glyphs to RunBuffer::glyphs and count scalars to RunBuffer::pos + before next call to SkTextBlobBuilder. + + RunBuffer::utf8text and RunBuffer::clusters should be ignored. + + Glyphs share metrics in font. + + Glyphs are positioned on a baseline at y, using x-axis positions written by + caller to RunBuffer::pos. + + bounds defines an optional bounding box, used to suppress drawing when SkTextBlob + bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds + is computed from y, RunBuffer::pos, and RunBuffer::glyphs metrics. + + @param font SkFont used for this run + @param count number of glyphs + @param y vertical offset within the blob + @param bounds optional run bounding box + @return writable glyph buffer and x-axis position buffer + */ + const RunBuffer& allocRunPosH(const SkFont& font, int count, SkScalar y, + const SkRect* bounds = nullptr); + + /** Returns run with storage for glyphs and SkPoint positions. Caller must + write count glyphs to RunBuffer::glyphs and count SkPoint to RunBuffer::pos + before next call to SkTextBlobBuilder. + + RunBuffer::utf8text and RunBuffer::clusters should be ignored. + + Glyphs share metrics in font. + + Glyphs are positioned using SkPoint written by caller to RunBuffer::pos, using + two scalar values for each SkPoint. + + bounds defines an optional bounding box, used to suppress drawing when SkTextBlob + bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds + is computed from RunBuffer::pos, and RunBuffer::glyphs metrics. + + @param font SkFont used for this run + @param count number of glyphs + @param bounds optional run bounding box + @return writable glyph buffer and SkPoint buffer + */ + const RunBuffer& allocRunPos(const SkFont& font, int count, + const SkRect* bounds = nullptr); + + // RunBuffer.pos points to SkRSXform array + const RunBuffer& allocRunRSXform(const SkFont& font, int count); + + /** Returns run with storage for glyphs, text, and clusters. Caller must + write count glyphs to RunBuffer::glyphs, textByteCount UTF-8 code units + into RunBuffer::utf8text, and count monotonic indexes into utf8text + into RunBuffer::clusters before next call to SkTextBlobBuilder. + + RunBuffer::pos should be ignored. + + Glyphs share metrics in font. + + Glyphs are positioned on a baseline at (x, y), using font metrics to + determine their relative placement. + + bounds defines an optional bounding box, used to suppress drawing when SkTextBlob + bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds + is computed from (x, y) and RunBuffer::glyphs metrics. + + @param font SkFont used for this run + @param count number of glyphs + @param x horizontal offset within the blob + @param y vertical offset within the blob + @param textByteCount number of UTF-8 code units + @param bounds optional run bounding box + @return writable glyph buffer, text buffer, and cluster buffer + */ + const RunBuffer& allocRunText(const SkFont& font, int count, SkScalar x, SkScalar y, + int textByteCount, const SkRect* bounds = nullptr); + + /** Returns run with storage for glyphs, positions along baseline, text, + and clusters. Caller must write count glyphs to RunBuffer::glyphs, + count scalars to RunBuffer::pos, textByteCount UTF-8 code units into + RunBuffer::utf8text, and count monotonic indexes into utf8text into + RunBuffer::clusters before next call to SkTextBlobBuilder. + + Glyphs share metrics in font. + + Glyphs are positioned on a baseline at y, using x-axis positions written by + caller to RunBuffer::pos. + + bounds defines an optional bounding box, used to suppress drawing when SkTextBlob + bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds + is computed from y, RunBuffer::pos, and RunBuffer::glyphs metrics. + + @param font SkFont used for this run + @param count number of glyphs + @param y vertical offset within the blob + @param textByteCount number of UTF-8 code units + @param bounds optional run bounding box + @return writable glyph buffer, x-axis position buffer, text buffer, and cluster buffer + */ + const RunBuffer& allocRunTextPosH(const SkFont& font, int count, SkScalar y, int textByteCount, + const SkRect* bounds = nullptr); + + /** Returns run with storage for glyphs, SkPoint positions, text, and + clusters. Caller must write count glyphs to RunBuffer::glyphs, count + SkPoint to RunBuffer::pos, textByteCount UTF-8 code units into + RunBuffer::utf8text, and count monotonic indexes into utf8text into + RunBuffer::clusters before next call to SkTextBlobBuilder. + + Glyphs share metrics in font. + + Glyphs are positioned using SkPoint written by caller to RunBuffer::pos, using + two scalar values for each SkPoint. + + bounds defines an optional bounding box, used to suppress drawing when SkTextBlob + bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds + is computed from RunBuffer::pos, and RunBuffer::glyphs metrics. + + @param font SkFont used for this run + @param count number of glyphs + @param textByteCount number of UTF-8 code units + @param bounds optional run bounding box + @return writable glyph buffer, SkPoint buffer, text buffer, and cluster buffer + */ + const RunBuffer& allocRunTextPos(const SkFont& font, int count, int textByteCount, + const SkRect* bounds = nullptr); + + // RunBuffer.pos points to SkRSXform array + const RunBuffer& allocRunTextRSXform(const SkFont& font, int count, int textByteCount, + const SkRect* bounds = nullptr); + +private: + void reserve(size_t size); + void allocInternal(const SkFont& font, SkTextBlob::GlyphPositioning positioning, + int count, int textBytes, SkPoint offset, const SkRect* bounds); + bool mergeRun(const SkFont& font, SkTextBlob::GlyphPositioning positioning, + uint32_t count, SkPoint offset); + void updateDeferredBounds(); + + static SkRect ConservativeRunBounds(const SkTextBlob::RunRecord&); + static SkRect TightRunBounds(const SkTextBlob::RunRecord&); + + friend class SkTextBlobPriv; + friend class SkTextBlobBuilderPriv; + + SkAutoTMalloc<uint8_t> fStorage; + size_t fStorageSize; + size_t fStorageUsed; + + SkRect fBounds; + int fRunCount; + bool fDeferredBounds; + size_t fLastRun; // index into fStorage + + RunBuffer fCurrentRunBuffer; +}; + +#endif // SkTextBlob_DEFINED diff --git a/src/deps/skia/include/core/SkTileMode.h b/src/deps/skia/include/core/SkTileMode.h new file mode 100644 index 000000000..8a9d02095 --- /dev/null +++ b/src/deps/skia/include/core/SkTileMode.h @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTileModes_DEFINED +#define SkTileModes_DEFINED + +#include "include/core/SkTypes.h" + +enum class SkTileMode { + /** + * Replicate the edge color if the shader draws outside of its + * original bounds. + */ + kClamp, + + /** + * Repeat the shader's image horizontally and vertically. + */ + kRepeat, + + /** + * Repeat the shader's image horizontally and vertically, alternating + * mirror images so that adjacent images always seam. + */ + kMirror, + + /** + * Only draw within the original domain, return transparent-black everywhere else. + */ + kDecal, + + kLastTileMode = kDecal, +}; + +static constexpr int kSkTileModeCount = static_cast<int>(SkTileMode::kLastTileMode) + 1; + +#endif diff --git a/src/deps/skia/include/core/SkTime.h b/src/deps/skia/include/core/SkTime.h new file mode 100644 index 000000000..3da2c8c77 --- /dev/null +++ b/src/deps/skia/include/core/SkTime.h @@ -0,0 +1,63 @@ + +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + +#ifndef SkTime_DEFINED +#define SkTime_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkMacros.h" + +#include <cinttypes> + +class SkString; + +/** \class SkTime + Platform-implemented utilities to return time of day, and millisecond counter. +*/ +class SK_API SkTime { +public: + struct DateTime { + int16_t fTimeZoneMinutes; // The number of minutes that GetDateTime() + // is ahead of or behind UTC. + uint16_t fYear; //!< e.g. 2005 + uint8_t fMonth; //!< 1..12 + uint8_t fDayOfWeek; //!< 0..6, 0==Sunday + uint8_t fDay; //!< 1..31 + uint8_t fHour; //!< 0..23 + uint8_t fMinute; //!< 0..59 + uint8_t fSecond; //!< 0..59 + + void toISO8601(SkString* dst) const; + }; + static void GetDateTime(DateTime*); + + static double GetSecs() { return GetNSecs() * 1e-9; } + static double GetMSecs() { return GetNSecs() * 1e-6; } + static double GetNSecs(); +}; + +/////////////////////////////////////////////////////////////////////////////// + +class SkAutoTime { +public: + // The label is not deep-copied, so its address must remain valid for the + // lifetime of this object + SkAutoTime(const char* label = nullptr) + : fLabel(label) + , fNow(SkTime::GetMSecs()) {} + ~SkAutoTime() { + uint64_t dur = static_cast<uint64_t>(SkTime::GetMSecs() - fNow); + SkDebugf("%s %" PRIu64 "\n", fLabel ? fLabel : "", dur); + } +private: + const char* fLabel; + double fNow; +}; + +#endif diff --git a/src/deps/skia/include/core/SkTraceMemoryDump.h b/src/deps/skia/include/core/SkTraceMemoryDump.h new file mode 100644 index 000000000..7837bfbd8 --- /dev/null +++ b/src/deps/skia/include/core/SkTraceMemoryDump.h @@ -0,0 +1,99 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTraceMemoryDump_DEFINED +#define SkTraceMemoryDump_DEFINED + +#include "include/core/SkTypes.h" + +class SkDiscardableMemory; + +/** + * Interface for memory tracing. + * This interface is meant to be passed as argument to the memory dump methods of Skia objects. + * The implementation of this interface is provided by the embedder. + */ +class SK_API SkTraceMemoryDump { +public: + /** + * Enum to specify the level of the requested details for the dump from the Skia objects. + */ + enum LevelOfDetail { + // Dump only the minimal details to get the total memory usage (Usually just the totals). + kLight_LevelOfDetail, + + // Dump the detailed breakdown of the objects in the caches. + kObjectsBreakdowns_LevelOfDetail + }; + + /** + * Appends a new memory dump (i.e. a row) to the trace memory infrastructure. + * If dumpName does not exist yet, a new one is created. Otherwise, a new column is appended to + * the previously created dump. + * Arguments: + * dumpName: an absolute, slash-separated, name for the item being dumped + * e.g., "skia/CacheX/EntryY". + * valueName: a string indicating the name of the column. + * e.g., "size", "active_size", "number_of_objects". + * This string is supposed to be long lived and is NOT copied. + * units: a string indicating the units for the value. + * e.g., "bytes", "objects". + * This string is supposed to be long lived and is NOT copied. + * value: the actual value being dumped. + */ + virtual void dumpNumericValue(const char* dumpName, + const char* valueName, + const char* units, + uint64_t value) = 0; + + virtual void dumpStringValue(const char* /*dumpName*/, + const char* /*valueName*/, + const char* /*value*/) { } + + /** + * Sets the memory backing for an existing dump. + * backingType and backingObjectId are used by the embedder to associate the memory dumped via + * dumpNumericValue with the corresponding dump that backs the memory. + */ + virtual void setMemoryBacking(const char* dumpName, + const char* backingType, + const char* backingObjectId) = 0; + + /** + * Specialization for memory backed by discardable memory. + */ + virtual void setDiscardableMemoryBacking( + const char* dumpName, + const SkDiscardableMemory& discardableMemoryObject) = 0; + + /** + * Returns the type of details requested in the dump. The granularity of the dump is supposed to + * match the LevelOfDetail argument. The level of detail must not affect the total size + * reported, but only granularity of the child entries. + */ + virtual LevelOfDetail getRequestedDetails() const = 0; + + /** + * Returns true if we should dump wrapped objects. Wrapped objects come from outside Skia, and + * may be independently tracked there. + */ + virtual bool shouldDumpWrappedObjects() const { return true; } + + /** + * If shouldDumpWrappedObjects() returns true then this function will be called to populate + * the output with information on whether the item being dumped is a wrapped object. + */ + virtual void dumpWrappedState(const char* /*dumpName*/, bool /*isWrappedObject*/) {} + +protected: + virtual ~SkTraceMemoryDump() = default; + SkTraceMemoryDump() = default; + SkTraceMemoryDump(const SkTraceMemoryDump&) = delete; + SkTraceMemoryDump& operator=(const SkTraceMemoryDump&) = delete; +}; + +#endif diff --git a/src/deps/skia/include/core/SkTypeface.h b/src/deps/skia/include/core/SkTypeface.h new file mode 100644 index 000000000..ad49a544b --- /dev/null +++ b/src/deps/skia/include/core/SkTypeface.h @@ -0,0 +1,454 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTypeface_DEFINED +#define SkTypeface_DEFINED + +#include "include/core/SkFontArguments.h" +#include "include/core/SkFontParameters.h" +#include "include/core/SkFontStyle.h" +#include "include/core/SkFontTypes.h" +#include "include/core/SkRect.h" +#include "include/core/SkString.h" +#include "include/private/SkOnce.h" +#include "include/private/SkWeakRefCnt.h" + +class SkData; +class SkDescriptor; +class SkFontData; +class SkFontDescriptor; +class SkScalerContext; +class SkStream; +class SkStreamAsset; +class SkWStream; +struct SkAdvancedTypefaceMetrics; +struct SkScalerContextEffects; +struct SkScalerContextRec; + +typedef uint32_t SkFontID; +/** Machine endian. */ +typedef uint32_t SkFontTableTag; + +/** \class SkTypeface + + The SkTypeface class specifies the typeface and intrinsic style of a font. + This is used in the paint, along with optionally algorithmic settings like + textSize, textSkewX, textScaleX, kFakeBoldText_Mask, to specify + how text appears when drawn (and measured). + + Typeface objects are immutable, and so they can be shared between threads. +*/ +class SK_API SkTypeface : public SkWeakRefCnt { +public: + /** Returns the typeface's intrinsic style attributes. */ + SkFontStyle fontStyle() const { + return fStyle; + } + + /** Returns true if style() has the kBold bit set. */ + bool isBold() const { return fStyle.weight() >= SkFontStyle::kSemiBold_Weight; } + + /** Returns true if style() has the kItalic bit set. */ + bool isItalic() const { return fStyle.slant() != SkFontStyle::kUpright_Slant; } + + /** Returns true if the typeface claims to be fixed-pitch. + * This is a style bit, advance widths may vary even if this returns true. + */ + bool isFixedPitch() const { return fIsFixedPitch; } + + /** Copy into 'coordinates' (allocated by the caller) the design variation coordinates. + * + * @param coordinates the buffer into which to write the design variation coordinates. + * @param coordinateCount the number of entries available through 'coordinates'. + * + * @return The number of axes, or -1 if there is an error. + * If 'coordinates != nullptr' and 'coordinateCount >= numAxes' then 'coordinates' will be + * filled with the variation coordinates describing the position of this typeface in design + * variation space. It is possible the number of axes can be retrieved but actual position + * cannot. + */ + int getVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[], + int coordinateCount) const; + + /** Copy into 'parameters' (allocated by the caller) the design variation parameters. + * + * @param parameters the buffer into which to write the design variation parameters. + * @param coordinateCount the number of entries available through 'parameters'. + * + * @return The number of axes, or -1 if there is an error. + * If 'parameters != nullptr' and 'parameterCount >= numAxes' then 'parameters' will be + * filled with the variation parameters describing the position of this typeface in design + * variation space. It is possible the number of axes can be retrieved but actual parameters + * cannot. + */ + int getVariationDesignParameters(SkFontParameters::Variation::Axis parameters[], + int parameterCount) const; + + /** Return a 32bit value for this typeface, unique for the underlying font + data. Will never return 0. + */ + SkFontID uniqueID() const { return fUniqueID; } + + /** Return the uniqueID for the specified typeface. If the face is null, + resolve it to the default font and return its uniqueID. Will never + return 0. + */ + static SkFontID UniqueID(const SkTypeface* face); + + /** Returns true if the two typefaces reference the same underlying font, + handling either being null (treating null as the default font) + */ + static bool Equal(const SkTypeface* facea, const SkTypeface* faceb); + + /** Returns the default normal typeface, which is never nullptr. */ + static sk_sp<SkTypeface> MakeDefault(); + + /** Creates a new reference to the typeface that most closely matches the + requested familyName and fontStyle. This method allows extended font + face specifiers as in the SkFontStyle type. Will never return null. + + @param familyName May be NULL. The name of the font family. + @param fontStyle The style of the typeface. + @return reference to the closest-matching typeface. Call must call + unref() when they are done. + */ + static sk_sp<SkTypeface> MakeFromName(const char familyName[], SkFontStyle fontStyle); + + /** Return a new typeface given a file. If the file does not exist, or is + not a valid font file, returns nullptr. + */ + static sk_sp<SkTypeface> MakeFromFile(const char path[], int index = 0); + + /** Return a new typeface given a stream. If the stream is + not a valid font file, returns nullptr. Ownership of the stream is + transferred, so the caller must not reference it again. + */ + static sk_sp<SkTypeface> MakeFromStream(std::unique_ptr<SkStreamAsset> stream, int index = 0); + + /** Return a new typeface given a SkData. If the data is null, or is not a valid font file, + * returns nullptr. + */ + static sk_sp<SkTypeface> MakeFromData(sk_sp<SkData>, int index = 0); + + /** Return a new typeface based on this typeface but parameterized as specified in the + SkFontArguments. If the SkFontArguments does not supply an argument for a parameter + in the font then the value from this typeface will be used as the value for that + argument. If the cloned typeface would be exaclty the same as this typeface then + this typeface may be ref'ed and returned. May return nullptr on failure. + */ + sk_sp<SkTypeface> makeClone(const SkFontArguments&) const; + + /** + * A typeface can serialize just a descriptor (names, etc.), or it can also include the + * actual font data (which can be large). This enum controls how serialize() decides what + * to serialize. + */ + enum class SerializeBehavior { + kDoIncludeData, + kDontIncludeData, + kIncludeDataIfLocal, + }; + + /** Write a unique signature to a stream, sufficient to reconstruct a + typeface referencing the same font when Deserialize is called. + */ + void serialize(SkWStream*, SerializeBehavior = SerializeBehavior::kIncludeDataIfLocal) const; + + /** + * Same as serialize(SkWStream*, ...) but returns the serialized data in SkData, instead of + * writing it to a stream. + */ + sk_sp<SkData> serialize(SerializeBehavior = SerializeBehavior::kIncludeDataIfLocal) const; + + /** Given the data previously written by serialize(), return a new instance + of a typeface referring to the same font. If that font is not available, + return nullptr. + Does not affect ownership of SkStream. + */ + static sk_sp<SkTypeface> MakeDeserialize(SkStream*); + + /** + * Given an array of UTF32 character codes, return their corresponding glyph IDs. + * + * @param chars pointer to the array of UTF32 chars + * @param number of chars and glyphs + * @param glyphs returns the corresponding glyph IDs for each character. + */ + void unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const; + + int textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding, + SkGlyphID glyphs[], int maxGlyphCount) const; + + /** + * Return the glyphID that corresponds to the specified unicode code-point + * (in UTF32 encoding). If the unichar is not supported, returns 0. + * + * This is a short-cut for calling unicharsToGlyphs(). + */ + SkGlyphID unicharToGlyph(SkUnichar unichar) const; + + /** + * Return the number of glyphs in the typeface. + */ + int countGlyphs() const; + + // Table getters -- may fail if the underlying font format is not organized + // as 4-byte tables. + + /** Return the number of tables in the font. */ + int countTables() const; + + /** Copy into tags[] (allocated by the caller) the list of table tags in + * the font, and return the number. This will be the same as CountTables() + * or 0 if an error occured. If tags == NULL, this only returns the count + * (the same as calling countTables()). + */ + int getTableTags(SkFontTableTag tags[]) const; + + /** Given a table tag, return the size of its contents, or 0 if not present + */ + size_t getTableSize(SkFontTableTag) const; + + /** Copy the contents of a table into data (allocated by the caller). Note + * that the contents of the table will be in their native endian order + * (which for most truetype tables is big endian). If the table tag is + * not found, or there is an error copying the data, then 0 is returned. + * If this happens, it is possible that some or all of the memory pointed + * to by data may have been written to, even though an error has occured. + * + * @param tag The table tag whose contents are to be copied + * @param offset The offset in bytes into the table's contents where the + * copy should start from. + * @param length The number of bytes, starting at offset, of table data + * to copy. + * @param data storage address where the table contents are copied to + * @return the number of bytes actually copied into data. If offset+length + * exceeds the table's size, then only the bytes up to the table's + * size are actually copied, and this is the value returned. If + * offset > the table's size, or tag is not a valid table, + * then 0 is returned. + */ + size_t getTableData(SkFontTableTag tag, size_t offset, size_t length, + void* data) const; + + /** + * Return an immutable copy of the requested font table, or nullptr if that table was + * not found. This can sometimes be faster than calling getTableData() twice: once to find + * the length, and then again to copy the data. + * + * @param tag The table tag whose contents are to be copied + * @return an immutable copy of the table's data, or nullptr. + */ + sk_sp<SkData> copyTableData(SkFontTableTag tag) const; + + /** + * Return the units-per-em value for this typeface, or zero if there is an + * error. + */ + int getUnitsPerEm() const; + + /** + * Given a run of glyphs, return the associated horizontal adjustments. + * Adjustments are in "design units", which are integers relative to the + * typeface's units per em (see getUnitsPerEm). + * + * Some typefaces are known to never support kerning. Calling this method + * with all zeros (e.g. getKerningPairAdustments(NULL, 0, NULL)) returns + * a boolean indicating if the typeface might support kerning. If it + * returns false, then it will always return false (no kerning) for all + * possible glyph runs. If it returns true, then it *may* return true for + * somne glyph runs. + * + * If count is non-zero, then the glyphs parameter must point to at least + * [count] valid glyph IDs, and the adjustments parameter must be + * sized to at least [count - 1] entries. If the method returns true, then + * [count-1] entries in the adjustments array will be set. If the method + * returns false, then no kerning should be applied, and the adjustments + * array will be in an undefined state (possibly some values may have been + * written, but none of them should be interpreted as valid values). + */ + bool getKerningPairAdjustments(const SkGlyphID glyphs[], int count, + int32_t adjustments[]) const; + + struct LocalizedString { + SkString fString; + SkString fLanguage; + }; + class LocalizedStrings { + public: + LocalizedStrings() = default; + virtual ~LocalizedStrings() { } + virtual bool next(LocalizedString* localizedString) = 0; + void unref() { delete this; } + + private: + LocalizedStrings(const LocalizedStrings&) = delete; + LocalizedStrings& operator=(const LocalizedStrings&) = delete; + }; + /** + * Returns an iterator which will attempt to enumerate all of the + * family names specified by the font. + * It is the caller's responsibility to unref() the returned pointer. + */ + LocalizedStrings* createFamilyNameIterator() const; + + /** + * Return the family name for this typeface. It will always be returned + * encoded as UTF8, but the language of the name is whatever the host + * platform chooses. + */ + void getFamilyName(SkString* name) const; + + /** + * Return the PostScript name for this typeface. + * Value may change based on variation parameters. + * Returns false if no PostScript name is available. + */ + bool getPostScriptName(SkString* name) const; + + /** + * Return a stream for the contents of the font data, or NULL on failure. + * If ttcIndex is not null, it is set to the TrueTypeCollection index + * of this typeface within the stream, or 0 if the stream is not a + * collection. + * The caller is responsible for deleting the stream. + */ + std::unique_ptr<SkStreamAsset> openStream(int* ttcIndex) const; + + /** + * Return a scalercontext for the given descriptor. It may return a + * stub scalercontext that will not crash, but will draw nothing. + */ + std::unique_ptr<SkScalerContext> createScalerContext(const SkScalerContextEffects&, + const SkDescriptor*) const; + + /** + * Return a rectangle (scaled to 1-pt) that represents the union of the bounds of all + * of the glyphs, but each one positioned at (0,). This may be conservatively large, and + * will not take into account any hinting or other size-specific adjustments. + */ + SkRect getBounds() const; + + // PRIVATE / EXPERIMENTAL -- do not call + void filterRec(SkScalerContextRec* rec) const { + this->onFilterRec(rec); + } + // PRIVATE / EXPERIMENTAL -- do not call + void getFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const { + this->onGetFontDescriptor(desc, isLocal); + } + // PRIVATE / EXPERIMENTAL -- do not call + void* internal_private_getCTFontRef() const { + return this->onGetCTFontRef(); + } + +protected: + explicit SkTypeface(const SkFontStyle& style, bool isFixedPitch = false); + ~SkTypeface() override; + + virtual sk_sp<SkTypeface> onMakeClone(const SkFontArguments&) const = 0; + + /** Sets the fixedPitch bit. If used, must be called in the constructor. */ + void setIsFixedPitch(bool isFixedPitch) { fIsFixedPitch = isFixedPitch; } + /** Sets the font style. If used, must be called in the constructor. */ + void setFontStyle(SkFontStyle style) { fStyle = style; } + + // Must return a valid scaler context. It can not return nullptr. + virtual std::unique_ptr<SkScalerContext> onCreateScalerContext(const SkScalerContextEffects&, + const SkDescriptor*) const = 0; + virtual void onFilterRec(SkScalerContextRec*) const = 0; + friend class SkScalerContext; // onFilterRec + + // Subclasses *must* override this method to work with the PDF backend. + virtual std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const = 0; + // For type1 postscript fonts only, set the glyph names for each glyph. + // destination array is non-null, and points to an array of size this->countGlyphs(). + // Backends that do not suport type1 fonts should not override. + virtual void getPostScriptGlyphNames(SkString*) const = 0; + + // The mapping from glyph to Unicode; array indices are glyph ids. + // For each glyph, give the default Unicode value, if it exists. + // dstArray is non-null, and points to an array of size this->countGlyphs(). + virtual void getGlyphToUnicodeMap(SkUnichar* dstArray) const = 0; + + virtual std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const = 0; + + virtual bool onGlyphMaskNeedsCurrentColor() const = 0; + + virtual int onGetVariationDesignPosition( + SkFontArguments::VariationPosition::Coordinate coordinates[], + int coordinateCount) const = 0; + + virtual int onGetVariationDesignParameters( + SkFontParameters::Variation::Axis parameters[], int parameterCount) const = 0; + + virtual void onGetFontDescriptor(SkFontDescriptor*, bool* isLocal) const = 0; + + virtual void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const = 0; + virtual int onCountGlyphs() const = 0; + + virtual int onGetUPEM() const = 0; + virtual bool onGetKerningPairAdjustments(const SkGlyphID glyphs[], int count, + int32_t adjustments[]) const; + + /** Returns the family name of the typeface as known by its font manager. + * This name may or may not be produced by the family name iterator. + */ + virtual void onGetFamilyName(SkString* familyName) const = 0; + virtual bool onGetPostScriptName(SkString*) const = 0; + + /** Returns an iterator over the family names in the font. */ + virtual LocalizedStrings* onCreateFamilyNameIterator() const = 0; + + virtual int onGetTableTags(SkFontTableTag tags[]) const = 0; + virtual size_t onGetTableData(SkFontTableTag, size_t offset, + size_t length, void* data) const = 0; + virtual sk_sp<SkData> onCopyTableData(SkFontTableTag) const; + + virtual bool onComputeBounds(SkRect*) const; + + virtual void* onGetCTFontRef() const { return nullptr; } + +private: + /** Returns true if the typeface's glyph masks may refer to the foreground + * paint foreground color. This is needed to determine caching requirements. Usually true for + * typefaces that contain a COLR table. + */ + bool glyphMaskNeedsCurrentColor() const; + friend class SkStrikeServerImpl; // glyphMaskNeedsCurrentColor + + /** Retrieve detailed typeface metrics. Used by the PDF backend. */ + std::unique_ptr<SkAdvancedTypefaceMetrics> getAdvancedMetrics() const; + friend class SkRandomTypeface; // getAdvancedMetrics + friend class SkPDFFont; // getAdvancedMetrics + + /** Style specifies the intrinsic style attributes of a given typeface */ + enum Style { + kNormal = 0, + kBold = 0x01, + kItalic = 0x02, + + // helpers + kBoldItalic = 0x03 + }; + static SkFontStyle FromOldStyle(Style oldStyle); + static SkTypeface* GetDefaultTypeface(Style style = SkTypeface::kNormal); + + friend class SkFontPriv; // GetDefaultTypeface + friend class SkPaintPriv; // GetDefaultTypeface + friend class SkFont; // getGlyphToUnicodeMap + +private: + SkFontID fUniqueID; + SkFontStyle fStyle; + mutable SkRect fBounds; + mutable SkOnce fBoundsOnce; + bool fIsFixedPitch; + + using INHERITED = SkWeakRefCnt; +}; +#endif diff --git a/src/deps/skia/include/core/SkTypes.h b/src/deps/skia/include/core/SkTypes.h new file mode 100644 index 000000000..1d94990d6 --- /dev/null +++ b/src/deps/skia/include/core/SkTypes.h @@ -0,0 +1,621 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTypes_DEFINED +#define SkTypes_DEFINED + +/** \file SkTypes.h +*/ + +// Pre-SkUserConfig.h setup. + +// Allows embedders that want to disable macros that take arguments to just +// define that symbol to be one of these +#define SK_NOTHING_ARG1(arg1) +#define SK_NOTHING_ARG2(arg1, arg2) +#define SK_NOTHING_ARG3(arg1, arg2, arg3) + +#if !defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_WIN) && \ + !defined(SK_BUILD_FOR_UNIX) && !defined(SK_BUILD_FOR_MAC) + + #ifdef __APPLE__ + #include <TargetConditionals.h> + #endif + + #if defined(_WIN32) || defined(__SYMBIAN32__) + #define SK_BUILD_FOR_WIN + #elif defined(ANDROID) || defined(__ANDROID__) + #define SK_BUILD_FOR_ANDROID + #elif defined(linux) || defined(__linux) || defined(__FreeBSD__) || \ + defined(__OpenBSD__) || defined(__sun) || defined(__NetBSD__) || \ + defined(__DragonFly__) || defined(__Fuchsia__) || \ + defined(__GLIBC__) || defined(__GNU__) || defined(__unix__) + #define SK_BUILD_FOR_UNIX + #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR + #define SK_BUILD_FOR_IOS + #else + #define SK_BUILD_FOR_MAC + #endif + +#endif + +#if defined(SK_BUILD_FOR_WIN) && !defined(__clang__) + #if !defined(SK_RESTRICT) + #define SK_RESTRICT __restrict + #endif + #if !defined(SK_WARN_UNUSED_RESULT) + #define SK_WARN_UNUSED_RESULT + #endif +#endif + +#if !defined(SK_RESTRICT) + #define SK_RESTRICT __restrict__ +#endif + +#if !defined(SK_WARN_UNUSED_RESULT) + #define SK_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) +#endif + +#if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN) + #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) + #define SK_CPU_BENDIAN + #elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + #define SK_CPU_LENDIAN + #elif defined(__sparc) || defined(__sparc__) || \ + defined(_POWER) || defined(__powerpc__) || \ + defined(__ppc__) || defined(__hppa) || \ + defined(__PPC__) || defined(__PPC64__) || \ + defined(_MIPSEB) || defined(__ARMEB__) || \ + defined(__s390__) || \ + (defined(__sh__) && defined(__BIG_ENDIAN__)) || \ + (defined(__ia64) && defined(__BIG_ENDIAN__)) + #define SK_CPU_BENDIAN + #else + #define SK_CPU_LENDIAN + #endif +#endif + +#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) + #define SK_CPU_X86 1 +#endif + +/** + * SK_CPU_SSE_LEVEL + * + * If defined, SK_CPU_SSE_LEVEL should be set to the highest supported level. + * On non-intel CPU this should be undefined. + */ +#define SK_CPU_SSE_LEVEL_SSE1 10 +#define SK_CPU_SSE_LEVEL_SSE2 20 +#define SK_CPU_SSE_LEVEL_SSE3 30 +#define SK_CPU_SSE_LEVEL_SSSE3 31 +#define SK_CPU_SSE_LEVEL_SSE41 41 +#define SK_CPU_SSE_LEVEL_SSE42 42 +#define SK_CPU_SSE_LEVEL_AVX 51 +#define SK_CPU_SSE_LEVEL_AVX2 52 +#define SK_CPU_SSE_LEVEL_SKX 60 + +// Are we in GCC/Clang? +#ifndef SK_CPU_SSE_LEVEL + // These checks must be done in descending order to ensure we set the highest + // available SSE level. + #if defined(__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512CD__) && \ + defined(__AVX512BW__) && defined(__AVX512VL__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SKX + #elif defined(__AVX2__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2 + #elif defined(__AVX__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX + #elif defined(__SSE4_2__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE42 + #elif defined(__SSE4_1__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE41 + #elif defined(__SSSE3__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSSE3 + #elif defined(__SSE3__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE3 + #elif defined(__SSE2__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2 + #endif +#endif + +// Are we in VisualStudio? +#ifndef SK_CPU_SSE_LEVEL + // These checks must be done in descending order to ensure we set the highest + // available SSE level. 64-bit intel guarantees at least SSE2 support. + #if defined(__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512CD__) && \ + defined(__AVX512BW__) && defined(__AVX512VL__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SKX + #elif defined(__AVX2__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2 + #elif defined(__AVX__) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX + #elif defined(_M_X64) || defined(_M_AMD64) + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2 + #elif defined(_M_IX86_FP) + #if _M_IX86_FP >= 2 + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2 + #elif _M_IX86_FP == 1 + #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE1 + #endif + #endif +#endif + +// ARM defines +#if defined(__arm__) && (!defined(__APPLE__) || !TARGET_IPHONE_SIMULATOR) + #define SK_CPU_ARM32 +#elif defined(__aarch64__) + #define SK_CPU_ARM64 +#endif + +// All 64-bit ARM chips have NEON. Many 32-bit ARM chips do too. +#if !defined(SK_ARM_HAS_NEON) && defined(__ARM_NEON) + #define SK_ARM_HAS_NEON +#endif + +#if defined(__ARM_FEATURE_CRC32) + #define SK_ARM_HAS_CRC32 +#endif + + +// DLL/.so exports. +#if !defined(SKIA_IMPLEMENTATION) + #define SKIA_IMPLEMENTATION 0 +#endif +#if !defined(SK_API) + #if defined(SKIA_DLL) + #if defined(_MSC_VER) + #if SKIA_IMPLEMENTATION + #define SK_API __declspec(dllexport) + #else + #define SK_API __declspec(dllimport) + #endif + #else + #define SK_API __attribute__((visibility("default"))) + #endif + #else + #define SK_API + #endif +#endif + +// SK_SPI is functionally identical to SK_API, but used within src to clarify that it's less stable +#if !defined(SK_SPI) + #define SK_SPI SK_API +#endif + +// IWYU pragma: begin_exports +#if defined (SK_USER_CONFIG_HEADER) + #include SK_USER_CONFIG_HEADER +#else + #include "include/config/SkUserConfig.h" +#endif +#include <stddef.h> +#include <stdint.h> +// IWYU pragma: end_exports + +// Post SkUserConfig.h checks and such. +#if !defined(SK_DEBUG) && !defined(SK_RELEASE) + #ifdef NDEBUG + #define SK_RELEASE + #else + #define SK_DEBUG + #endif +#endif + +#if defined(SK_DEBUG) && defined(SK_RELEASE) +# error "cannot define both SK_DEBUG and SK_RELEASE" +#elif !defined(SK_DEBUG) && !defined(SK_RELEASE) +# error "must define either SK_DEBUG or SK_RELEASE" +#endif + +#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN) +# error "cannot define both SK_CPU_LENDIAN and SK_CPU_BENDIAN" +#elif !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN) +# error "must define either SK_CPU_LENDIAN or SK_CPU_BENDIAN" +#endif + +#if defined(SK_CPU_BENDIAN) && !defined(I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN) + #error "The Skia team is not endian-savvy enough to support big-endian CPUs." + #error "If you still want to use Skia," + #error "please define I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN." +#endif + +#if !defined(SK_ATTRIBUTE) +# if defined(__clang__) || defined(__GNUC__) +# define SK_ATTRIBUTE(attr) __attribute__((attr)) +# else +# define SK_ATTRIBUTE(attr) +# endif +#endif + +#if !defined(SK_SUPPORT_GPU) +# define SK_SUPPORT_GPU 1 +#endif + +#if SK_SUPPORT_GPU || SK_GRAPHITE_ENABLED +# if !defined(SK_ENABLE_SKSL) +# define SK_ENABLE_SKSL +# endif +#else +# undef SK_GL +# undef SK_VULKAN +# undef SK_METAL +# undef SK_DAWN +# undef SK_DIRECT3D +#endif + +#if !defined(SkUNREACHABLE) +# if defined(_MSC_VER) && !defined(__clang__) +# include <intrin.h> +# define FAST_FAIL_INVALID_ARG 5 +// See https://developercommunity.visualstudio.com/content/problem/1128631/code-flow-doesnt-see-noreturn-with-extern-c.html +// for why this is wrapped. Hopefully removable after msvc++ 19.27 is no longer supported. +[[noreturn]] static inline void sk_fast_fail() { __fastfail(FAST_FAIL_INVALID_ARG); } +# define SkUNREACHABLE sk_fast_fail() +# else +# define SkUNREACHABLE __builtin_trap() +# endif +#endif + +#if defined(SK_BUILD_FOR_GOOGLE3) + void SkDebugfForDumpStackTrace(const char* data, void* unused); + void DumpStackTrace(int skip_count, void w(const char*, void*), void* arg); +# define SK_DUMP_GOOGLE3_STACK() DumpStackTrace(0, SkDebugfForDumpStackTrace, nullptr) +#else +# define SK_DUMP_GOOGLE3_STACK() +#endif + +#ifndef SK_ABORT +# ifdef SK_BUILD_FOR_WIN + // This style lets Visual Studio follow errors back to the source file. +# define SK_DUMP_LINE_FORMAT "%s(%d)" +# else +# define SK_DUMP_LINE_FORMAT "%s:%d" +# endif +# define SK_ABORT(message, ...) \ + do { \ + SkDebugf(SK_DUMP_LINE_FORMAT ": fatal error: \"" message "\"\n", \ + __FILE__, __LINE__, ##__VA_ARGS__); \ + SK_DUMP_GOOGLE3_STACK(); \ + sk_abort_no_print(); \ + } while (false) +#endif + +// If SK_R32_SHIFT is set, we'll use that to choose RGBA or BGRA. +// If not, we'll default to RGBA everywhere except BGRA on Windows. +#if defined(SK_R32_SHIFT) + static_assert(SK_R32_SHIFT == 0 || SK_R32_SHIFT == 16, ""); +#elif defined(SK_BUILD_FOR_WIN) + #define SK_R32_SHIFT 16 +#else + #define SK_R32_SHIFT 0 +#endif + +#if defined(SK_B32_SHIFT) + static_assert(SK_B32_SHIFT == (16-SK_R32_SHIFT), ""); +#else + #define SK_B32_SHIFT (16-SK_R32_SHIFT) +#endif + +#define SK_G32_SHIFT 8 +#define SK_A32_SHIFT 24 + + +/** + * SK_PMCOLOR_BYTE_ORDER can be used to query the byte order of SkPMColor at compile time. + */ +#ifdef SK_CPU_BENDIAN +# define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \ + (SK_ ## C3 ## 32_SHIFT == 0 && \ + SK_ ## C2 ## 32_SHIFT == 8 && \ + SK_ ## C1 ## 32_SHIFT == 16 && \ + SK_ ## C0 ## 32_SHIFT == 24) +#else +# define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \ + (SK_ ## C0 ## 32_SHIFT == 0 && \ + SK_ ## C1 ## 32_SHIFT == 8 && \ + SK_ ## C2 ## 32_SHIFT == 16 && \ + SK_ ## C3 ## 32_SHIFT == 24) +#endif + +#if defined SK_DEBUG && defined SK_BUILD_FOR_WIN + #ifdef free + #undef free + #endif + #include <crtdbg.h> + #undef free +#endif + +#if !defined(SK_UNUSED) +# if !defined(__clang__) && defined(_MSC_VER) +# define SK_UNUSED __pragma(warning(suppress:4189)) +# else +# define SK_UNUSED SK_ATTRIBUTE(unused) +# endif +#endif + +#if !defined(SK_MAYBE_UNUSED) +# if defined(__clang__) || defined(__GNUC__) +# define SK_MAYBE_UNUSED [[maybe_unused]] +# else +# define SK_MAYBE_UNUSED +# endif +#endif + +/** + * If your judgment is better than the compiler's (i.e. you've profiled it), + * you can use SK_ALWAYS_INLINE to force inlining. E.g. + * inline void someMethod() { ... } // may not be inlined + * SK_ALWAYS_INLINE void someMethod() { ... } // should always be inlined + */ +#if !defined(SK_ALWAYS_INLINE) +# if defined(SK_BUILD_FOR_WIN) +# define SK_ALWAYS_INLINE __forceinline +# else +# define SK_ALWAYS_INLINE SK_ATTRIBUTE(always_inline) inline +# endif +#endif + +/** + * If your judgment is better than the compiler's (i.e. you've profiled it), + * you can use SK_NEVER_INLINE to prevent inlining. + */ +#if !defined(SK_NEVER_INLINE) +# if defined(SK_BUILD_FOR_WIN) +# define SK_NEVER_INLINE __declspec(noinline) +# else +# define SK_NEVER_INLINE SK_ATTRIBUTE(noinline) +# endif +#endif + +#ifndef SK_PRINTF_LIKE +# if defined(__clang__) || defined(__GNUC__) +# define SK_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B)))) +# else +# define SK_PRINTF_LIKE(A, B) +# endif +#endif + +#ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS + #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 0 +#endif + +#if !defined(SK_GAMMA_EXPONENT) + #define SK_GAMMA_EXPONENT (0.0f) // SRGB +#endif + +#ifndef GR_TEST_UTILS +# define GR_TEST_UTILS 0 +#endif + +#ifndef SK_GPU_V1 +# define SK_GPU_V1 1 +#endif + +#if defined(SK_HISTOGRAM_ENUMERATION) || \ + defined(SK_HISTOGRAM_BOOLEAN) || \ + defined(SK_HISTOGRAM_EXACT_LINEAR) || \ + defined(SK_HISTOGRAM_MEMORY_KB) +# define SK_HISTOGRAMS_ENABLED 1 +#else +# define SK_HISTOGRAMS_ENABLED 0 +#endif + +#ifndef SK_HISTOGRAM_BOOLEAN +# define SK_HISTOGRAM_BOOLEAN(name, sample) +#endif + +#ifndef SK_HISTOGRAM_ENUMERATION +# define SK_HISTOGRAM_ENUMERATION(name, sample, enum_size) +#endif + +#ifndef SK_HISTOGRAM_EXACT_LINEAR +# define SK_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) +#endif + +#ifndef SK_HISTOGRAM_MEMORY_KB +# define SK_HISTOGRAM_MEMORY_KB(name, sample) +#endif + +#define SK_HISTOGRAM_PERCENTAGE(name, percent_as_int) \ + SK_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101) + +#ifndef SK_DISABLE_LEGACY_SHADERCONTEXT +#define SK_ENABLE_LEGACY_SHADERCONTEXT +#endif + +#ifdef SK_ENABLE_API_AVAILABLE +#define SK_API_AVAILABLE API_AVAILABLE +#else +#define SK_API_AVAILABLE(...) +#endif + +#if defined(SK_BUILD_FOR_LIBFUZZER) || defined(SK_BUILD_FOR_AFL_FUZZ) + #define SK_BUILD_FOR_FUZZER +#endif + +/** Called internally if we hit an unrecoverable error. + The platform implementation must not return, but should either throw + an exception or otherwise exit. +*/ +[[noreturn]] SK_API extern void sk_abort_no_print(void); + +#ifndef SkDebugf + SK_API void SkDebugf(const char format[], ...) SK_PRINTF_LIKE(1, 2); +#endif + +// SkASSERT, SkASSERTF and SkASSERT_RELEASE can be used as stand alone assertion expressions, e.g. +// uint32_t foo(int x) { +// SkASSERT(x > 4); +// return x - 4; +// } +// and are also written to be compatible with constexpr functions: +// constexpr uint32_t foo(int x) { +// return SkASSERT(x > 4), +// x - 4; +// } +#define SkASSERT_RELEASE(cond) \ + static_cast<void>( (cond) ? (void)0 : []{ SK_ABORT("assert(%s)", #cond); }() ) + +#ifdef SK_DEBUG + #define SkASSERT(cond) SkASSERT_RELEASE(cond) + #define SkASSERTF(cond, fmt, ...) static_cast<void>( (cond) ? (void)0 : [&]{ \ + SkDebugf(fmt"\n", ##__VA_ARGS__); \ + SK_ABORT("assert(%s)", #cond); \ + }() ) + #define SkDEBUGFAIL(message) SK_ABORT("%s", message) + #define SkDEBUGFAILF(fmt, ...) SK_ABORT(fmt, ##__VA_ARGS__) + #define SkDEBUGCODE(...) __VA_ARGS__ + #define SkDEBUGF(...) SkDebugf(__VA_ARGS__) + #define SkAssertResult(cond) SkASSERT(cond) +#else + #define SkASSERT(cond) static_cast<void>(0) + #define SkASSERTF(cond, fmt, ...) static_cast<void>(0) + #define SkDEBUGFAIL(message) + #define SkDEBUGFAILF(fmt, ...) + #define SkDEBUGCODE(...) + #define SkDEBUGF(...) + + // unlike SkASSERT, this macro executes its condition in the non-debug build. + // The if is present so that this can be used with functions marked SK_WARN_UNUSED_RESULT. + #define SkAssertResult(cond) if (cond) {} do {} while(false) +#endif + +//////////////////////////////////////////////////////////////////////////////// + +/** Fast type for unsigned 8 bits. Use for parameter passing and local + variables, not for storage +*/ +typedef unsigned U8CPU; + +/** Fast type for unsigned 16 bits. Use for parameter passing and local + variables, not for storage +*/ +typedef unsigned U16CPU; + +/** @return false or true based on the condition +*/ +template <typename T> static constexpr bool SkToBool(const T& x) { + return 0 != x; // NOLINT(modernize-use-nullptr) +} + +static constexpr int16_t SK_MaxS16 = INT16_MAX; +static constexpr int16_t SK_MinS16 = -SK_MaxS16; + +static constexpr int32_t SK_MaxS32 = INT32_MAX; +static constexpr int32_t SK_MinS32 = -SK_MaxS32; +static constexpr int32_t SK_NaN32 = INT32_MIN; + +static constexpr int64_t SK_MaxS64 = INT64_MAX; +static constexpr int64_t SK_MinS64 = -SK_MaxS64; + +static inline constexpr int32_t SkLeftShift(int32_t value, int32_t shift) { + return (int32_t) ((uint32_t) value << shift); +} + +static inline constexpr int64_t SkLeftShift(int64_t value, int32_t shift) { + return (int64_t) ((uint64_t) value << shift); +} + +//////////////////////////////////////////////////////////////////////////////// + +/** @return the number of entries in an array (not a pointer) +*/ +template <typename T, size_t N> char (&SkArrayCountHelper(T (&array)[N]))[N]; +#define SK_ARRAY_COUNT(array) (sizeof(SkArrayCountHelper(array))) + +//////////////////////////////////////////////////////////////////////////////// + +template <typename T> static constexpr T SkAlign2(T x) { return (x + 1) >> 1 << 1; } +template <typename T> static constexpr T SkAlign4(T x) { return (x + 3) >> 2 << 2; } +template <typename T> static constexpr T SkAlign8(T x) { return (x + 7) >> 3 << 3; } + +template <typename T> static constexpr bool SkIsAlign2(T x) { return 0 == (x & 1); } +template <typename T> static constexpr bool SkIsAlign4(T x) { return 0 == (x & 3); } +template <typename T> static constexpr bool SkIsAlign8(T x) { return 0 == (x & 7); } + +template <typename T> static constexpr T SkAlignPtr(T x) { + return sizeof(void*) == 8 ? SkAlign8(x) : SkAlign4(x); +} +template <typename T> static constexpr bool SkIsAlignPtr(T x) { + return sizeof(void*) == 8 ? SkIsAlign8(x) : SkIsAlign4(x); +} + +/** + * align up to a power of 2 + */ +static inline constexpr size_t SkAlignTo(size_t x, size_t alignment) { + // The same as alignment && SkIsPow2(value), w/o a dependency cycle. + SkASSERT(alignment && (alignment & (alignment - 1)) == 0); + return (x + alignment - 1) & ~(alignment - 1); +} + +typedef uint32_t SkFourByteTag; +static inline constexpr SkFourByteTag SkSetFourByteTag(char a, char b, char c, char d) { + return (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d); +} + +//////////////////////////////////////////////////////////////////////////////// + +/** 32 bit integer to hold a unicode value +*/ +typedef int32_t SkUnichar; + +/** 16 bit unsigned integer to hold a glyph index +*/ +typedef uint16_t SkGlyphID; + +/** 32 bit value to hold a millisecond duration + Note that SK_MSecMax is about 25 days. +*/ +typedef uint32_t SkMSec; + +/** Maximum representable milliseconds; 24d 20h 31m 23.647s. +*/ +static constexpr SkMSec SK_MSecMax = INT32_MAX; + +/** The generation IDs in Skia reserve 0 has an invalid marker. +*/ +static constexpr uint32_t SK_InvalidGenID = 0; + +/** The unique IDs in Skia reserve 0 has an invalid marker. +*/ +static constexpr uint32_t SK_InvalidUniqueID = 0; + +static inline int32_t SkAbs32(int32_t value) { + SkASSERT(value != SK_NaN32); // The most negative int32_t can't be negated. + if (value < 0) { + value = -value; + } + return value; +} + +template <typename T> static inline T SkTAbs(T value) { + if (value < 0) { + value = -value; + } + return value; +} + +//////////////////////////////////////////////////////////////////////////////// + +/** Indicates whether an allocation should count against a cache budget. +*/ +enum class SkBudgeted : bool { + kNo = false, + kYes = true +}; + +/** Indicates whether a backing store needs to be an exact match or can be + larger than is strictly necessary +*/ +enum class SkBackingFit { + kApprox, + kExact +}; + +#endif diff --git a/src/deps/skia/include/core/SkUnPreMultiply.h b/src/deps/skia/include/core/SkUnPreMultiply.h new file mode 100644 index 000000000..b492619d0 --- /dev/null +++ b/src/deps/skia/include/core/SkUnPreMultiply.h @@ -0,0 +1,56 @@ + +/* + * Copyright 2008 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + + + + +#ifndef SkUnPreMultiply_DEFINED +#define SkUnPreMultiply_DEFINED + +#include "include/core/SkColor.h" + +class SK_API SkUnPreMultiply { +public: + typedef uint32_t Scale; + + // index this table with alpha [0..255] + static const Scale* GetScaleTable() { + return gTable; + } + + static Scale GetScale(U8CPU alpha) { + SkASSERT(alpha <= 255); + return gTable[alpha]; + } + + /** Usage: + + const Scale* table = SkUnPreMultiply::GetScaleTable(); + + for (...) { + unsigned a = ... + SkUnPreMultiply::Scale scale = table[a]; + + red = SkUnPreMultiply::ApplyScale(scale, red); + ... + // now red is unpremultiplied + } + */ + static U8CPU ApplyScale(Scale scale, U8CPU component) { + SkASSERT(component <= 255); + return (scale * component + (1 << 23)) >> 24; + } + + static SkColor PMColorToColor(SkPMColor c); + +private: + static const uint32_t gTable[256]; +}; + +#endif diff --git a/src/deps/skia/include/core/SkVertices.h b/src/deps/skia/include/core/SkVertices.h new file mode 100644 index 000000000..fc53e9d01 --- /dev/null +++ b/src/deps/skia/include/core/SkVertices.h @@ -0,0 +1,132 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkVertices_DEFINED +#define SkVertices_DEFINED + +#include "include/core/SkColor.h" +#include "include/core/SkRect.h" +#include "include/core/SkRefCnt.h" + +class SkData; +struct SkPoint; +class SkVerticesPriv; + +/** + * An immutable set of vertex data that can be used with SkCanvas::drawVertices. + */ +class SK_API SkVertices : public SkNVRefCnt<SkVertices> { + struct Desc; + struct Sizes; +public: + enum VertexMode { + kTriangles_VertexMode, + kTriangleStrip_VertexMode, + kTriangleFan_VertexMode, + + kLast_VertexMode = kTriangleFan_VertexMode, + }; + + /** + * Create a vertices by copying the specified arrays. texs, colors may be nullptr, + * and indices is ignored if indexCount == 0. + */ + static sk_sp<SkVertices> MakeCopy(VertexMode mode, int vertexCount, + const SkPoint positions[], + const SkPoint texs[], + const SkColor colors[], + int indexCount, + const uint16_t indices[]); + + static sk_sp<SkVertices> MakeCopy(VertexMode mode, int vertexCount, + const SkPoint positions[], + const SkPoint texs[], + const SkColor colors[]) { + return MakeCopy(mode, + vertexCount, + positions, + texs, + colors, + 0, + nullptr); + } + + enum BuilderFlags { + kHasTexCoords_BuilderFlag = 1 << 0, + kHasColors_BuilderFlag = 1 << 1, + }; + class Builder { + public: + Builder(VertexMode mode, int vertexCount, int indexCount, uint32_t flags); + + bool isValid() const { return fVertices != nullptr; } + + SkPoint* positions(); + uint16_t* indices(); // returns null if there are no indices + + // If we have custom attributes, these will always be null + SkPoint* texCoords(); // returns null if there are no texCoords + SkColor* colors(); // returns null if there are no colors + + // Detach the built vertices object. After the first call, this will always return null. + sk_sp<SkVertices> detach(); + + private: + Builder(const Desc&); + + void init(const Desc&); + + // holds a partially complete object. only completed in detach() + sk_sp<SkVertices> fVertices; + // Extra storage for intermediate vertices in the case where the client specifies indexed + // triangle fans. These get converted to indexed triangles when the Builder is finalized. + std::unique_ptr<uint8_t[]> fIntermediateFanIndices; + + friend class SkVertices; + friend class SkVerticesPriv; + }; + + uint32_t uniqueID() const { return fUniqueID; } + const SkRect& bounds() const { return fBounds; } + + // returns approximate byte size of the vertices object + size_t approximateSize() const; + + // Provides access to functions that aren't part of the public API. + SkVerticesPriv priv(); + const SkVerticesPriv priv() const; // NOLINT(readability-const-return-type) + +private: + SkVertices() {} + + friend class SkVerticesPriv; + + // these are needed since we've manually sized our allocation (see Builder::init) + friend class SkNVRefCnt<SkVertices>; + void operator delete(void* p); + + Sizes getSizes() const; + + // we store this first, to pair with the refcnt in our base-class, so we don't have an + // unnecessary pad between it and the (possibly 8-byte aligned) ptrs. + uint32_t fUniqueID; + + // these point inside our allocation, so none of these can be "freed" + SkPoint* fPositions; // [vertexCount] + uint16_t* fIndices; // [indexCount] or null + SkPoint* fTexs; // [vertexCount] or null + SkColor* fColors; // [vertexCount] or null + + SkRect fBounds; // computed to be the union of the fPositions[] + int fVertexCount; + int fIndexCount; + + VertexMode fMode; + // below here is where the actual array data is stored. +}; + +#endif diff --git a/src/deps/skia/include/core/SkYUVAInfo.h b/src/deps/skia/include/core/SkYUVAInfo.h new file mode 100644 index 000000000..a3cf210f3 --- /dev/null +++ b/src/deps/skia/include/core/SkYUVAInfo.h @@ -0,0 +1,304 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkYUVAInfo_DEFINED +#define SkYUVAInfo_DEFINED + +#include "include/codec/SkEncodedOrigin.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkSize.h" + +#include <array> +#include <tuple> + +/** + * Specifies the structure of planes for a YUV image with optional alpha. The actual planar data + * is not part of this structure and depending on usage is in external textures or pixmaps. + */ +class SK_API SkYUVAInfo { +public: + enum YUVAChannels { kY, kU, kV, kA, kLast = kA }; + static constexpr int kYUVAChannelCount = static_cast<int>(YUVAChannels::kLast + 1); + + struct YUVALocation; // For internal use. + using YUVALocations = std::array<YUVALocation, kYUVAChannelCount>; + + /** + * Specifies how YUV (and optionally A) are divided among planes. Planes are separated by + * underscores in the enum value names. Within each plane the pixmap/texture channels are + * mapped to the YUVA channels in the order specified, e.g. for kY_UV Y is in channel 0 of plane + * 0, U is in channel 0 of plane 1, and V is in channel 1 of plane 1. Channel ordering + * within a pixmap/texture given the channels it contains: + * A: 0:A + * Luminance/Gray: 0:Gray + * Luminance/Gray + Alpha: 0:Gray, 1:A + * RG 0:R, 1:G + * RGB 0:R, 1:G, 2:B + * RGBA 0:R, 1:G, 2:B, 3:A + */ + enum class PlaneConfig { + kUnknown, + + kY_U_V, ///< Plane 0: Y, Plane 1: U, Plane 2: V + kY_V_U, ///< Plane 0: Y, Plane 1: V, Plane 2: U + kY_UV, ///< Plane 0: Y, Plane 1: UV + kY_VU, ///< Plane 0: Y, Plane 1: VU + kYUV, ///< Plane 0: YUV + kUYV, ///< Plane 0: UYV + + kY_U_V_A, ///< Plane 0: Y, Plane 1: U, Plane 2: V, Plane 3: A + kY_V_U_A, ///< Plane 0: Y, Plane 1: V, Plane 2: U, Plane 3: A + kY_UV_A, ///< Plane 0: Y, Plane 1: UV, Plane 2: A + kY_VU_A, ///< Plane 0: Y, Plane 1: VU, Plane 2: A + kYUVA, ///< Plane 0: YUVA + kUYVA, ///< Plane 0: UYVA + + kLast = kUYVA + }; + + /** + * UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is + * 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub- + * sampled. Note that Subsampling values other than k444 are only valid with PlaneConfig values + * that have U and V in different planes than Y (and A, if present). + */ + enum class Subsampling { + kUnknown, + + k444, ///< No subsampling. UV values for each Y. + k422, ///< 1 set of UV values for each 2x1 block of Y values. + k420, ///< 1 set of UV values for each 2x2 block of Y values. + k440, ///< 1 set of UV values for each 1x2 block of Y values. + k411, ///< 1 set of UV values for each 4x1 block of Y values. + k410, ///< 1 set of UV values for each 4x2 block of Y values. + + kLast = k410 + }; + + /** + * Describes how subsampled chroma values are sited relative to luma values. + * + * Currently only centered siting is supported but will expand to support additional sitings. + */ + enum class Siting { + /** + * Subsampled chroma value is sited at the center of the block of corresponding luma values. + */ + kCentered, + }; + + static constexpr int kMaxPlanes = 4; + + /** ratio of Y/A values to U/V values in x and y. */ + static std::tuple<int, int> SubsamplingFactors(Subsampling); + + /** + * SubsamplingFactors(Subsampling) if planedIdx refers to a U/V plane and otherwise {1, 1} if + * inputs are valid. Invalid inputs consist of incompatible PlaneConfig/Subsampling/planeIdx + * combinations. {0, 0} is returned for invalid inputs. + */ + static std::tuple<int, int> PlaneSubsamplingFactors(PlaneConfig, Subsampling, int planeIdx); + + /** + * Given image dimensions, a planer configuration, subsampling, and origin, determine the + * expected size of each plane. Returns the number of expected planes. planeDimensions[0] + * through planeDimensions[<ret>] are written. The input image dimensions are as displayed + * (after the planes have been transformed to the intended display orientation). The plane + * dimensions are output as the planes are stored in memory (may be rotated from image + * dimensions). + */ + static int PlaneDimensions(SkISize imageDimensions, + PlaneConfig, + Subsampling, + SkEncodedOrigin, + SkISize planeDimensions[kMaxPlanes]); + + /** Number of planes for a given PlaneConfig. */ + static constexpr int NumPlanes(PlaneConfig); + + /** + * Number of Y, U, V, A channels in the ith plane for a given PlaneConfig (or 0 if i is + * invalid). + */ + static constexpr int NumChannelsInPlane(PlaneConfig, int i); + + /** + * Given a PlaneConfig and a set of channel flags for each plane, convert to YUVALocations + * representation. Fails if channel flags aren't valid for the PlaneConfig (i.e. don't have + * enough channels in a plane) by returning an invalid set of locations (plane indices are -1). + */ + static YUVALocations GetYUVALocations(PlaneConfig, const uint32_t* planeChannelFlags); + + /** Does the PlaneConfig have alpha values? */ + static bool HasAlpha(PlaneConfig); + + SkYUVAInfo() = default; + SkYUVAInfo(const SkYUVAInfo&) = default; + + /** + * 'dimensions' should specify the size of the full resolution image (after planes have been + * oriented to how the image is displayed as indicated by 'origin'). + */ + SkYUVAInfo(SkISize dimensions, + PlaneConfig, + Subsampling, + SkYUVColorSpace, + SkEncodedOrigin origin = kTopLeft_SkEncodedOrigin, + Siting sitingX = Siting::kCentered, + Siting sitingY = Siting::kCentered); + + SkYUVAInfo& operator=(const SkYUVAInfo& that) = default; + + PlaneConfig planeConfig() const { return fPlaneConfig; } + Subsampling subsampling() const { return fSubsampling; } + + std::tuple<int, int> planeSubsamplingFactors(int planeIdx) const { + return PlaneSubsamplingFactors(fPlaneConfig, fSubsampling, planeIdx); + } + + /** + * Dimensions of the full resolution image (after planes have been oriented to how the image + * is displayed as indicated by fOrigin). + */ + SkISize dimensions() const { return fDimensions; } + int width() const { return fDimensions.width(); } + int height() const { return fDimensions.height(); } + + SkYUVColorSpace yuvColorSpace() const { return fYUVColorSpace; } + Siting sitingX() const { return fSitingX; } + Siting sitingY() const { return fSitingY; } + + SkEncodedOrigin origin() const { return fOrigin; } + + SkMatrix originMatrix() const { + return SkEncodedOriginToMatrix(fOrigin, this->width(), this->height()); + } + + bool hasAlpha() const { return HasAlpha(fPlaneConfig); } + + /** + * Returns the number of planes and initializes planeDimensions[0]..planeDimensions[<ret>] to + * the expected dimensions for each plane. Dimensions are as stored in memory, before + * transformation to image display space as indicated by origin(). + */ + int planeDimensions(SkISize planeDimensions[kMaxPlanes]) const { + return PlaneDimensions(fDimensions, fPlaneConfig, fSubsampling, fOrigin, planeDimensions); + } + + /** + * Given a per-plane row bytes, determine size to allocate for all planes. Optionally retrieves + * the per-plane byte sizes in planeSizes if not null. If total size overflows will return + * SIZE_MAX and set all planeSizes to SIZE_MAX. + */ + size_t computeTotalBytes(const size_t rowBytes[kMaxPlanes], + size_t planeSizes[kMaxPlanes] = nullptr) const; + + int numPlanes() const { return NumPlanes(fPlaneConfig); } + + int numChannelsInPlane(int i) const { return NumChannelsInPlane(fPlaneConfig, i); } + + /** + * Given a set of channel flags for each plane, converts this->planeConfig() to YUVALocations + * representation. Fails if the channel flags aren't valid for the PlaneConfig (i.e. don't have + * enough channels in a plane) by returning default initialized locations (all plane indices are + * -1). + */ + YUVALocations toYUVALocations(const uint32_t* channelFlags) const; + + /** + * Makes a SkYUVAInfo that is identical to this one but with the passed Subsampling. If the + * passed Subsampling is not k444 and this info's PlaneConfig is not compatible with chroma + * subsampling (because Y is in the same plane as UV) then the result will be an invalid + * SkYUVAInfo. + */ + SkYUVAInfo makeSubsampling(SkYUVAInfo::Subsampling) const; + + /** + * Makes a SkYUVAInfo that is identical to this one but with the passed dimensions. If the + * passed dimensions is empty then the result will be an invalid SkYUVAInfo. + */ + SkYUVAInfo makeDimensions(SkISize) const; + + bool operator==(const SkYUVAInfo& that) const; + bool operator!=(const SkYUVAInfo& that) const { return !(*this == that); } + + bool isValid() const { return fPlaneConfig != PlaneConfig::kUnknown; } + +private: + SkISize fDimensions = {0, 0}; + + PlaneConfig fPlaneConfig = PlaneConfig::kUnknown; + Subsampling fSubsampling = Subsampling::kUnknown; + + SkYUVColorSpace fYUVColorSpace = SkYUVColorSpace::kIdentity_SkYUVColorSpace; + + /** + * YUVA data often comes from formats like JPEG that support EXIF orientation. + * Code that operates on the raw YUV data often needs to know that orientation. + */ + SkEncodedOrigin fOrigin = kTopLeft_SkEncodedOrigin; + + Siting fSitingX = Siting::kCentered; + Siting fSitingY = Siting::kCentered; +}; + +constexpr int SkYUVAInfo::NumPlanes(PlaneConfig planeConfig) { + switch (planeConfig) { + case PlaneConfig::kUnknown: return 0; + case PlaneConfig::kY_U_V: return 3; + case PlaneConfig::kY_V_U: return 3; + case PlaneConfig::kY_UV: return 2; + case PlaneConfig::kY_VU: return 2; + case PlaneConfig::kYUV: return 1; + case PlaneConfig::kUYV: return 1; + case PlaneConfig::kY_U_V_A: return 4; + case PlaneConfig::kY_V_U_A: return 4; + case PlaneConfig::kY_UV_A: return 3; + case PlaneConfig::kY_VU_A: return 3; + case PlaneConfig::kYUVA: return 1; + case PlaneConfig::kUYVA: return 1; + } + SkUNREACHABLE; +} + +constexpr int SkYUVAInfo::NumChannelsInPlane(PlaneConfig config, int i) { + switch (config) { + case PlaneConfig::kUnknown: + return 0; + + case SkYUVAInfo::PlaneConfig::kY_U_V: + case SkYUVAInfo::PlaneConfig::kY_V_U: + return i >= 0 && i < 3 ? 1 : 0; + case SkYUVAInfo::PlaneConfig::kY_UV: + case SkYUVAInfo::PlaneConfig::kY_VU: + switch (i) { + case 0: return 1; + case 1: return 2; + default: return 0; + } + case SkYUVAInfo::PlaneConfig::kYUV: + case SkYUVAInfo::PlaneConfig::kUYV: + return i == 0 ? 3 : 0; + case SkYUVAInfo::PlaneConfig::kY_U_V_A: + case SkYUVAInfo::PlaneConfig::kY_V_U_A: + return i >= 0 && i < 4 ? 1 : 0; + case SkYUVAInfo::PlaneConfig::kY_UV_A: + case SkYUVAInfo::PlaneConfig::kY_VU_A: + switch (i) { + case 0: return 1; + case 1: return 2; + case 2: return 1; + default: return 0; + } + case SkYUVAInfo::PlaneConfig::kYUVA: + case SkYUVAInfo::PlaneConfig::kUYVA: + return i == 0 ? 4 : 0; + } + return 0; +} + +#endif diff --git a/src/deps/skia/include/core/SkYUVAPixmaps.h b/src/deps/skia/include/core/SkYUVAPixmaps.h new file mode 100644 index 000000000..de04ab14d --- /dev/null +++ b/src/deps/skia/include/core/SkYUVAPixmaps.h @@ -0,0 +1,336 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkYUVAPixmaps_DEFINED +#define SkYUVAPixmaps_DEFINED + +#include "include/core/SkData.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkPixmap.h" +#include "include/core/SkYUVAInfo.h" +#include "include/private/SkTo.h" + +#include <array> +#include <bitset> + +class GrImageContext; + +/** + * SkYUVAInfo combined with per-plane SkColorTypes and row bytes. Fully specifies the SkPixmaps + * for a YUVA image without the actual pixel memory and data. + */ +class SK_API SkYUVAPixmapInfo { +public: + static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes; + + using PlaneConfig = SkYUVAInfo::PlaneConfig; + using Subsampling = SkYUVAInfo::Subsampling; + + /** + * Data type for Y, U, V, and possibly A channels independent of how values are packed into + * planes. + **/ + enum class DataType { + kUnorm8, ///< 8 bit unsigned normalized + kUnorm16, ///< 16 bit unsigned normalized + kFloat16, ///< 16 bit (half) floating point + kUnorm10_Unorm2, ///< 10 bit unorm for Y, U, and V. 2 bit unorm for alpha (if present). + + kLast = kUnorm10_Unorm2 + }; + static constexpr int kDataTypeCnt = static_cast<int>(DataType::kLast) + 1; + + class SK_API SupportedDataTypes { + public: + /** Defaults to nothing supported. */ + constexpr SupportedDataTypes() = default; + + /** Init based on texture formats supported by the context. */ + SupportedDataTypes(const GrImageContext&); + + /** All legal combinations of PlaneConfig and DataType are supported. */ + static constexpr SupportedDataTypes All(); + + /** + * Checks whether there is a supported combination of color types for planes structured + * as indicated by PlaneConfig with channel data types as indicated by DataType. + */ + constexpr bool supported(PlaneConfig, DataType) const; + + /** + * Update to add support for pixmaps with numChannel channels where each channel is + * represented as DataType. + */ + void enableDataType(DataType, int numChannels); + + private: + // The bit for DataType dt with n channels is at index kDataTypeCnt*(n-1) + dt. + std::bitset<kDataTypeCnt*4> fDataTypeSupport = {}; + }; + + /** + * Gets the default SkColorType to use with numChannels channels, each represented as DataType. + * Returns kUnknown_SkColorType if no such color type. + */ + static constexpr SkColorType DefaultColorTypeForDataType(DataType dataType, int numChannels); + + /** + * If the SkColorType is supported for YUVA pixmaps this will return the number of YUVA channels + * that can be stored in a plane of this color type and what the DataType is of those channels. + * If the SkColorType is not supported as a YUVA plane the number of channels is reported as 0 + * and the DataType returned should be ignored. + */ + static std::tuple<int, DataType> NumChannelsAndDataType(SkColorType); + + /** Default SkYUVAPixmapInfo is invalid. */ + SkYUVAPixmapInfo() = default; + + /** + * Initializes the SkYUVAPixmapInfo from a SkYUVAInfo with per-plane color types and row bytes. + * This will be invalid if the colorTypes aren't compatible with the SkYUVAInfo or if a + * rowBytes entry is not valid for the plane dimensions and color type. Color type and + * row byte values beyond the number of planes in SkYUVAInfo are ignored. All SkColorTypes + * must have the same DataType or this will be invalid. + * + * If rowBytes is nullptr then bpp*width is assumed for each plane. + */ + SkYUVAPixmapInfo(const SkYUVAInfo&, + const SkColorType[kMaxPlanes], + const size_t rowBytes[kMaxPlanes]); + /** + * Like above but uses DefaultColorTypeForDataType to determine each plane's SkColorType. If + * rowBytes is nullptr then bpp*width is assumed for each plane. + */ + SkYUVAPixmapInfo(const SkYUVAInfo&, DataType, const size_t rowBytes[kMaxPlanes]); + + SkYUVAPixmapInfo(const SkYUVAPixmapInfo&) = default; + + SkYUVAPixmapInfo& operator=(const SkYUVAPixmapInfo&) = default; + + bool operator==(const SkYUVAPixmapInfo&) const; + bool operator!=(const SkYUVAPixmapInfo& that) const { return !(*this == that); } + + const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; } + + SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); } + + /** The number of SkPixmap planes, 0 if this SkYUVAPixmapInfo is invalid. */ + int numPlanes() const { return fYUVAInfo.numPlanes(); } + + /** The per-YUV[A] channel data type. */ + DataType dataType() const { return fDataType; } + + /** + * Row bytes for the ith plane. Returns zero if i >= numPlanes() or this SkYUVAPixmapInfo is + * invalid. + */ + size_t rowBytes(int i) const { return fRowBytes[static_cast<size_t>(i)]; } + + /** Image info for the ith plane, or default SkImageInfo if i >= numPlanes() */ + const SkImageInfo& planeInfo(int i) const { return fPlaneInfos[static_cast<size_t>(i)]; } + + /** + * Determine size to allocate for all planes. Optionally retrieves the per-plane sizes in + * planeSizes if not null. If total size overflows will return SIZE_MAX and set all planeSizes + * to SIZE_MAX. Returns 0 and fills planesSizes with 0 if this SkYUVAPixmapInfo is not valid. + */ + size_t computeTotalBytes(size_t planeSizes[kMaxPlanes] = nullptr) const; + + /** + * Takes an allocation that is assumed to be at least computeTotalBytes() in size and configures + * the first numPlanes() entries in pixmaps array to point into that memory. The remaining + * entries of pixmaps are default initialized. Fails if this SkYUVAPixmapInfo not valid. + */ + bool initPixmapsFromSingleAllocation(void* memory, SkPixmap pixmaps[kMaxPlanes]) const; + + /** + * Returns true if this has been configured with a non-empty dimensioned SkYUVAInfo with + * compatible color types and row bytes. + */ + bool isValid() const { return fYUVAInfo.isValid(); } + + /** Is this valid and does it use color types allowed by the passed SupportedDataTypes? */ + bool isSupported(const SupportedDataTypes&) const; + +private: + SkYUVAInfo fYUVAInfo; + std::array<SkImageInfo, kMaxPlanes> fPlaneInfos = {}; + std::array<size_t, kMaxPlanes> fRowBytes = {}; + DataType fDataType = DataType::kUnorm8; + static_assert(kUnknown_SkColorType == 0, "default init isn't kUnknown"); +}; + +/** + * Helper to store SkPixmap planes as described by a SkYUVAPixmapInfo. Can be responsible for + * allocating/freeing memory for pixmaps or use external memory. + */ +class SK_API SkYUVAPixmaps { +public: + using DataType = SkYUVAPixmapInfo::DataType; + static constexpr auto kMaxPlanes = SkYUVAPixmapInfo::kMaxPlanes; + + static SkColorType RecommendedRGBAColorType(DataType); + + /** Allocate space for pixmaps' pixels in the SkYUVAPixmaps. */ + static SkYUVAPixmaps Allocate(const SkYUVAPixmapInfo& yuvaPixmapInfo); + + /** + * Use storage in SkData as backing store for pixmaps' pixels. SkData is retained by the + * SkYUVAPixmaps. + */ + static SkYUVAPixmaps FromData(const SkYUVAPixmapInfo&, sk_sp<SkData>); + + /** + * Makes a deep copy of the src SkYUVAPixmaps. The returned SkYUVAPixmaps owns its planes' + * backing stores. + */ + static SkYUVAPixmaps MakeCopy(const SkYUVAPixmaps& src); + + /** + * Use passed in memory as backing store for pixmaps' pixels. Caller must ensure memory remains + * allocated while pixmaps are in use. There must be at least + * SkYUVAPixmapInfo::computeTotalBytes() allocated starting at memory. + */ + static SkYUVAPixmaps FromExternalMemory(const SkYUVAPixmapInfo&, void* memory); + + /** + * Wraps existing SkPixmaps. The SkYUVAPixmaps will have no ownership of the SkPixmaps' pixel + * memory so the caller must ensure it remains valid. Will return an invalid SkYUVAPixmaps if + * the SkYUVAInfo isn't compatible with the SkPixmap array (number of planes, plane dimensions, + * sufficient color channels in planes, ...). + */ + static SkYUVAPixmaps FromExternalPixmaps(const SkYUVAInfo&, const SkPixmap[kMaxPlanes]); + + /** Default SkYUVAPixmaps is invalid. */ + SkYUVAPixmaps() = default; + ~SkYUVAPixmaps() = default; + + SkYUVAPixmaps(SkYUVAPixmaps&& that) = default; + SkYUVAPixmaps& operator=(SkYUVAPixmaps&& that) = default; + SkYUVAPixmaps(const SkYUVAPixmaps&) = default; + SkYUVAPixmaps& operator=(const SkYUVAPixmaps& that) = default; + + /** Does have initialized pixmaps compatible with its SkYUVAInfo. */ + bool isValid() const { return !fYUVAInfo.dimensions().isEmpty(); } + + const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; } + + DataType dataType() const { return fDataType; } + + SkYUVAPixmapInfo pixmapsInfo() const; + + /** Number of pixmap planes or 0 if this SkYUVAPixmaps is invalid. */ + int numPlanes() const { return this->isValid() ? fYUVAInfo.numPlanes() : 0; } + + /** + * Access the SkPixmap planes. They are default initialized if this is not a valid + * SkYUVAPixmaps. + */ + const std::array<SkPixmap, kMaxPlanes>& planes() const { return fPlanes; } + + /** + * Get the ith SkPixmap plane. SkPixmap will be default initialized if i >= numPlanes or this + * SkYUVAPixmaps is invalid. + */ + const SkPixmap& plane(int i) const { return fPlanes[SkToSizeT(i)]; } + + /** + * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be + * valid if this->isValid(). + */ + SkYUVAInfo::YUVALocations toYUVALocations() const; + + /** Does this SkPixmaps own the backing store of the planes? */ + bool ownsStorage() const { return SkToBool(fData); } + +private: + SkYUVAPixmaps(const SkYUVAPixmapInfo&, sk_sp<SkData>); + SkYUVAPixmaps(const SkYUVAInfo&, DataType, const SkPixmap[kMaxPlanes]); + + std::array<SkPixmap, kMaxPlanes> fPlanes = {}; + sk_sp<SkData> fData; + SkYUVAInfo fYUVAInfo; + DataType fDataType; +}; + +////////////////////////////////////////////////////////////////////////////// + +constexpr SkYUVAPixmapInfo::SupportedDataTypes SkYUVAPixmapInfo::SupportedDataTypes::All() { + using ULL = unsigned long long; // bitset cons. takes this. + ULL bits = 0; + for (ULL c = 1; c <= 4; ++c) { + for (ULL dt = 0; dt <= ULL(kDataTypeCnt); ++dt) { + if (DefaultColorTypeForDataType(static_cast<DataType>(dt), + static_cast<int>(c)) != kUnknown_SkColorType) { + bits |= ULL(1) << (dt + static_cast<ULL>(kDataTypeCnt)*(c - 1)); + } + } + } + SupportedDataTypes combinations; + combinations.fDataTypeSupport = bits; + return combinations; +} + +constexpr bool SkYUVAPixmapInfo::SupportedDataTypes::supported(PlaneConfig config, + DataType type) const { + int n = SkYUVAInfo::NumPlanes(config); + for (int i = 0; i < n; ++i) { + auto c = static_cast<size_t>(SkYUVAInfo::NumChannelsInPlane(config, i)); + SkASSERT(c >= 1 && c <= 4); + if (!fDataTypeSupport[static_cast<size_t>(type) + + (c - 1)*static_cast<size_t>(kDataTypeCnt)]) { + return false; + } + } + return true; +} + +constexpr SkColorType SkYUVAPixmapInfo::DefaultColorTypeForDataType(DataType dataType, + int numChannels) { + switch (numChannels) { + case 1: + switch (dataType) { + case DataType::kUnorm8: return kGray_8_SkColorType; + case DataType::kUnorm16: return kA16_unorm_SkColorType; + case DataType::kFloat16: return kA16_float_SkColorType; + case DataType::kUnorm10_Unorm2: return kUnknown_SkColorType; + } + break; + case 2: + switch (dataType) { + case DataType::kUnorm8: return kR8G8_unorm_SkColorType; + case DataType::kUnorm16: return kR16G16_unorm_SkColorType; + case DataType::kFloat16: return kR16G16_float_SkColorType; + case DataType::kUnorm10_Unorm2: return kUnknown_SkColorType; + } + break; + case 3: + // None of these are tightly packed. The intended use case is for interleaved YUVA + // planes where we're forcing opaqueness by ignoring the alpha values. + // There are "x" rather than "A" variants for Unorm8 and Unorm10_Unorm2 but we don't + // choose them because 1) there is no inherent advantage and 2) there is better support + // in the GPU backend for the "A" versions. + switch (dataType) { + case DataType::kUnorm8: return kRGBA_8888_SkColorType; + case DataType::kUnorm16: return kR16G16B16A16_unorm_SkColorType; + case DataType::kFloat16: return kRGBA_F16_SkColorType; + case DataType::kUnorm10_Unorm2: return kRGBA_1010102_SkColorType; + } + break; + case 4: + switch (dataType) { + case DataType::kUnorm8: return kRGBA_8888_SkColorType; + case DataType::kUnorm16: return kR16G16B16A16_unorm_SkColorType; + case DataType::kFloat16: return kRGBA_F16_SkColorType; + case DataType::kUnorm10_Unorm2: return kRGBA_1010102_SkColorType; + } + break; + } + return kUnknown_SkColorType; +} + +#endif diff --git a/src/deps/skia/include/docs/BUILD.bazel b/src/deps/skia/include/docs/BUILD.bazel new file mode 100644 index 000000000..c59bf7cae --- /dev/null +++ b/src/deps/skia/include/docs/BUILD.bazel @@ -0,0 +1,26 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkPDFDocument_hdr", + hdrs = ["SkPDFDocument.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkColor_hdr", + "//include/core:SkDocument_hdr", + "//include/core:SkMilestone_hdr", + "//include/core:SkScalar_hdr", + "//include/core:SkString_hdr", + "//include/core:SkTime_hdr", + "//include/private:SkNoncopyable_hdr", + ], +) + +generated_cc_atom( + name = "SkXPSDocument_hdr", + hdrs = ["SkXPSDocument.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkDocument_hdr", + "//include/core:SkTypes_hdr", + ], +) diff --git a/src/deps/skia/include/docs/SkPDFDocument.h b/src/deps/skia/include/docs/SkPDFDocument.h new file mode 100644 index 000000000..69e7c6b85 --- /dev/null +++ b/src/deps/skia/include/docs/SkPDFDocument.h @@ -0,0 +1,196 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. +#ifndef SkPDFDocument_DEFINED +#define SkPDFDocument_DEFINED + +#include "include/core/SkDocument.h" + +#include <vector> + +#include "include/core/SkColor.h" +#include "include/core/SkMilestone.h" +#include "include/core/SkScalar.h" +#include "include/core/SkString.h" +#include "include/core/SkTime.h" +#include "include/private/SkNoncopyable.h" + +#define SKPDF_STRING(X) SKPDF_STRING_IMPL(X) +#define SKPDF_STRING_IMPL(X) #X + +class SkExecutor; +class SkPDFArray; +class SkPDFTagTree; + +namespace SkPDF { + +/** Attributes for nodes in the PDF tree. */ +class SK_API AttributeList : SkNoncopyable { +public: + AttributeList(); + ~AttributeList(); + + // Each attribute must have an owner (e.g. "Layout", "List", "Table", etc) + // and an attribute name (e.g. "BBox", "RowSpan", etc.) from PDF32000_2008 14.8.5, + // and then a value of the proper type according to the spec. + void appendInt(const char* owner, const char* name, int value); + void appendFloat(const char* owner, const char* name, float value); + void appendName(const char* owner, const char* attrName, const char* value); + void appendString(const char* owner, const char* attrName, const char* value); + void appendFloatArray(const char* owner, + const char* name, + const std::vector<float>& value); + // Deprecated. + void appendStringArray(const char* owner, + const char* attrName, + const std::vector<SkString>& values); + void appendNodeIdArray(const char* owner, + const char* attrName, + const std::vector<int>& nodeIds); + +private: + friend class ::SkPDFTagTree; + + std::unique_ptr<SkPDFArray> fAttrs; +}; + +/** A node in a PDF structure tree, giving a semantic representation + of the content. Each node ID is associated with content + by passing the SkCanvas and node ID to SkPDF::SetNodeId() when drawing. + NodeIDs should be unique within each tree. +*/ +struct StructureElementNode { + SkString fTypeString; + std::vector<std::unique_ptr<StructureElementNode>> fChildVector; + int fNodeId = 0; + std::vector<int> fAdditionalNodeIds; + AttributeList fAttributes; + SkString fAlt; + SkString fLang; +}; + +/** Optional metadata to be passed into the PDF factory function. +*/ +struct Metadata { + /** The document's title. + */ + SkString fTitle; + + /** The name of the person who created the document. + */ + SkString fAuthor; + + /** The subject of the document. + */ + SkString fSubject; + + /** Keywords associated with the document. Commas may be used to delineate + keywords within the string. + */ + SkString fKeywords; + + /** If the document was converted to PDF from another format, + the name of the conforming product that created the + original document from which it was converted. + */ + SkString fCreator; + + /** The product that is converting this document to PDF. + */ + SkString fProducer = SkString("Skia/PDF m" SKPDF_STRING(SK_MILESTONE)); + + /** The date and time the document was created. + The zero default value represents an unknown/unset time. + */ + SkTime::DateTime fCreation = {0, 0, 0, 0, 0, 0, 0, 0}; + + /** The date and time the document was most recently modified. + The zero default value represents an unknown/unset time. + */ + SkTime::DateTime fModified = {0, 0, 0, 0, 0, 0, 0, 0}; + + /** The DPI (pixels-per-inch) at which features without native PDF support + will be rasterized (e.g. draw image with perspective, draw text with + perspective, ...) A larger DPI would create a PDF that reflects the + original intent with better fidelity, but it can make for larger PDF + files too, which would use more memory while rendering, and it would be + slower to be processed or sent online or to printer. + */ + SkScalar fRasterDPI = SK_ScalarDefaultRasterDPI; + + /** If true, include XMP metadata, a document UUID, and sRGB output intent + information. This adds length to the document and makes it + non-reproducable, but are necessary features for PDF/A-2b conformance + */ + bool fPDFA = false; + + /** Encoding quality controls the trade-off between size and quality. By + default this is set to 101 percent, which corresponds to lossless + encoding. If this value is set to a value <= 100, and the image is + opaque, it will be encoded (using JPEG) with that quality setting. + */ + int fEncodingQuality = 101; + + /** An optional tree of structured document tags that provide + a semantic representation of the content. The caller + should retain ownership. + */ + StructureElementNode* fStructureElementTreeRoot = nullptr; + + /** Executor to handle threaded work within PDF Backend. If this is nullptr, + then all work will be done serially on the main thread. To have worker + threads assist with various tasks, set this to a valid SkExecutor + instance. Currently used for executing Deflate algorithm in parallel. + + If set, the PDF output will be non-reproducible in the order and + internal numbering of objects, but should render the same. + + Experimental. + */ + SkExecutor* fExecutor = nullptr; + + /** Preferred Subsetter. Only respected if both are compiled in. + + The Sfntly subsetter is deprecated. + + Experimental. + */ + enum Subsetter { + kHarfbuzz_Subsetter, + kSfntly_Subsetter, + } fSubsetter = kHarfbuzz_Subsetter; +}; + +/** Associate a node ID with subsequent drawing commands in an + SkCanvas. The same node ID can appear in a StructureElementNode + in order to associate a document's structure element tree with + its content. + + A node ID of zero indicates no node ID. + + @param canvas The canvas used to draw to the PDF. + @param nodeId The node ID for subsequent drawing commands. +*/ +SK_API void SetNodeId(SkCanvas* dst, int nodeID); + +/** Create a PDF-backed document, writing the results into a SkWStream. + + PDF pages are sized in point units. 1 pt == 1/72 inch == 127/360 mm. + + @param stream A PDF document will be written to this stream. The document may write + to the stream at anytime during its lifetime, until either close() is + called or the document is deleted. + @param metadata a PDFmetadata object. Any fields may be left empty. + + @returns NULL if there is an error, otherwise a newly created PDF-backed SkDocument. +*/ +SK_API sk_sp<SkDocument> MakeDocument(SkWStream* stream, const Metadata& metadata); + +static inline sk_sp<SkDocument> MakeDocument(SkWStream* stream) { + return MakeDocument(stream, Metadata()); +} + +} // namespace SkPDF + +#undef SKPDF_STRING +#undef SKPDF_STRING_IMPL +#endif // SkPDFDocument_DEFINED diff --git a/src/deps/skia/include/docs/SkXPSDocument.h b/src/deps/skia/include/docs/SkXPSDocument.h new file mode 100644 index 000000000..5cd0777c9 --- /dev/null +++ b/src/deps/skia/include/docs/SkXPSDocument.h @@ -0,0 +1,27 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkXPSDocument_DEFINED +#define SkXPSDocument_DEFINED + +#include "include/core/SkTypes.h" + +#ifdef SK_BUILD_FOR_WIN + +#include "include/core/SkDocument.h" + +struct IXpsOMObjectFactory; + +namespace SkXPS { + +SK_API sk_sp<SkDocument> MakeDocument(SkWStream* stream, + IXpsOMObjectFactory* xpsFactory, + SkScalar dpi = SK_ScalarDefaultRasterDPI); + +} // namespace SkXPS +#endif // SK_BUILD_FOR_WIN +#endif // SkXPSDocument_DEFINED diff --git a/src/deps/skia/include/effects/BUILD.bazel b/src/deps/skia/include/effects/BUILD.bazel new file mode 100644 index 000000000..ecb349097 --- /dev/null +++ b/src/deps/skia/include/effects/BUILD.bazel @@ -0,0 +1,219 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "Sk1DPathEffect_hdr", + hdrs = ["Sk1DPathEffect.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkPathEffect_hdr"], +) + +generated_cc_atom( + name = "Sk2DPathEffect_hdr", + hdrs = ["Sk2DPathEffect.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkPathEffect_hdr"], +) + +generated_cc_atom( + name = "SkBlenders_hdr", + hdrs = ["SkBlenders.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkBlender_hdr"], +) + +generated_cc_atom( + name = "SkBlurDrawLooper_hdr", + hdrs = ["SkBlurDrawLooper.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkDrawLooper_hdr"], +) + +generated_cc_atom( + name = "SkBlurMaskFilter_hdr", + hdrs = ["SkBlurMaskFilter.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkBlurTypes_hdr", + "//include/core:SkMaskFilter_hdr", + "//include/core:SkRect_hdr", + "//include/core:SkScalar_hdr", + ], +) + +generated_cc_atom( + name = "SkColorMatrixFilter_hdr", + hdrs = ["SkColorMatrixFilter.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkColorMatrix_hdr", + "//include/core:SkColorFilter_hdr", + ], +) + +generated_cc_atom( + name = "SkColorMatrix_hdr", + hdrs = ["SkColorMatrix.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkImageInfo_hdr"], +) + +generated_cc_atom( + name = "SkCornerPathEffect_hdr", + hdrs = ["SkCornerPathEffect.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkPathEffect_hdr"], +) + +generated_cc_atom( + name = "SkDashPathEffect_hdr", + hdrs = ["SkDashPathEffect.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkPathEffect_hdr"], +) + +generated_cc_atom( + name = "SkDiscretePathEffect_hdr", + hdrs = ["SkDiscretePathEffect.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkPathEffect_hdr"], +) + +generated_cc_atom( + name = "SkGradientShader_hdr", + hdrs = ["SkGradientShader.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkShader_hdr"], +) + +generated_cc_atom( + name = "SkHighContrastFilter_hdr", + hdrs = ["SkHighContrastFilter.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkColorFilter_hdr"], +) + +generated_cc_atom( + name = "SkImageFilters_hdr", + hdrs = ["SkImageFilters.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRuntimeEffect_hdr", + "//include/core:SkBlendMode_hdr", + "//include/core:SkColor_hdr", + "//include/core:SkImageFilter_hdr", + "//include/core:SkImage_hdr", + "//include/core:SkPicture_hdr", + "//include/core:SkRect_hdr", + "//include/core:SkTileMode_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkLayerDrawLooper_hdr", + hdrs = ["SkLayerDrawLooper.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkBlendMode_hdr", + "//include/core:SkDrawLooper_hdr", + "//include/core:SkPaint_hdr", + "//include/core:SkPoint_hdr", + ], +) + +generated_cc_atom( + name = "SkLumaColorFilter_hdr", + hdrs = ["SkLumaColorFilter.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkColorFilter_hdr"], +) + +generated_cc_atom( + name = "SkOpPathEffect_hdr", + hdrs = ["SkOpPathEffect.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkMatrix_hdr", + "//include/core:SkPaint_hdr", + "//include/core:SkPathEffect_hdr", + "//include/pathops:SkPathOps_hdr", + ], +) + +generated_cc_atom( + name = "SkOverdrawColorFilter_hdr", + hdrs = ["SkOverdrawColorFilter.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkColorFilter_hdr", + "//include/core:SkFlattenable_hdr", + ], +) + +generated_cc_atom( + name = "SkPerlinNoiseShader_hdr", + hdrs = ["SkPerlinNoiseShader.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkShader_hdr"], +) + +generated_cc_atom( + name = "SkRuntimeEffect_hdr", + hdrs = ["SkRuntimeEffect.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkBlender_hdr", + "//include/core:SkColorFilter_hdr", + "//include/core:SkData_hdr", + "//include/core:SkImageInfo_hdr", + "//include/core:SkMatrix_hdr", + "//include/core:SkShader_hdr", + "//include/core:SkSpan_hdr", + "//include/core:SkString_hdr", + "//include/private:SkOnce_hdr", + "//include/private:SkSLSampleUsage_hdr", + "//include/private:SkTOptional_hdr", + ], +) + +generated_cc_atom( + name = "SkShaderMaskFilter_hdr", + hdrs = ["SkShaderMaskFilter.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkMaskFilter_hdr"], +) + +generated_cc_atom( + name = "SkStrokeAndFillPathEffect_hdr", + hdrs = ["SkStrokeAndFillPathEffect.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkPaint_hdr", + "//include/core:SkPathEffect_hdr", + "//include/pathops:SkPathOps_hdr", + ], +) + +generated_cc_atom( + name = "SkTableColorFilter_hdr", + hdrs = ["SkTableColorFilter.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkColorFilter_hdr"], +) + +generated_cc_atom( + name = "SkTableMaskFilter_hdr", + hdrs = ["SkTableMaskFilter.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkMaskFilter_hdr", + "//include/core:SkScalar_hdr", + ], +) + +generated_cc_atom( + name = "SkTrimPathEffect_hdr", + hdrs = ["SkTrimPathEffect.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkPathEffect_hdr"], +) diff --git a/src/deps/skia/include/effects/Sk1DPathEffect.h b/src/deps/skia/include/effects/Sk1DPathEffect.h new file mode 100644 index 000000000..070fc33b5 --- /dev/null +++ b/src/deps/skia/include/effects/Sk1DPathEffect.h @@ -0,0 +1,35 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef Sk1DPathEffect_DEFINED +#define Sk1DPathEffect_DEFINED + +#include "include/core/SkPathEffect.h" + +class SK_API SkPath1DPathEffect { +public: + enum Style { + kTranslate_Style, // translate the shape to each position + kRotate_Style, // rotate the shape about its center + kMorph_Style, // transform each point, and turn lines into curves + + kLastEnum_Style = kMorph_Style, + }; + + /** Dash by replicating the specified path. + @param path The path to replicate (dash) + @param advance The space between instances of path + @param phase distance (mod advance) along path for its initial position + @param style how to transform path at each point (based on the current + position and tangent) + */ + static sk_sp<SkPathEffect> Make(const SkPath& path, SkScalar advance, SkScalar phase, Style); + + static void RegisterFlattenables(); +}; + +#endif diff --git a/src/deps/skia/include/effects/Sk2DPathEffect.h b/src/deps/skia/include/effects/Sk2DPathEffect.h new file mode 100644 index 000000000..96481ca11 --- /dev/null +++ b/src/deps/skia/include/effects/Sk2DPathEffect.h @@ -0,0 +1,30 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef Sk2DPathEffect_DEFINED +#define Sk2DPathEffect_DEFINED + +#include "include/core/SkPathEffect.h" + +class SkMatrix; +class SkPath; + +class SK_API SkLine2DPathEffect { +public: + static sk_sp<SkPathEffect> Make(SkScalar width, const SkMatrix& matrix); + + static void RegisterFlattenables(); +}; + +class SK_API SkPath2DPathEffect { +public: + static sk_sp<SkPathEffect> Make(const SkMatrix& matrix, const SkPath& path); + + static void RegisterFlattenables(); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkBlenders.h b/src/deps/skia/include/effects/SkBlenders.h new file mode 100644 index 000000000..7507071b0 --- /dev/null +++ b/src/deps/skia/include/effects/SkBlenders.h @@ -0,0 +1,27 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBlenders_DEFINED +#define SkBlenders_DEFINED + +#include "include/core/SkBlender.h" + +class SK_API SkBlenders { +public: + /** + * Create a blender that implements the following: + * k1 * src * dst + k2 * src + k3 * dst + k4 + * @param k1, k2, k3, k4 The four coefficients. + * @param enforcePMColor If true, the RGB channels will be clamped to the calculated alpha. + */ + static sk_sp<SkBlender> Arithmetic(float k1, float k2, float k3, float k4, bool enforcePremul); + +private: + SkBlenders() = delete; +}; + +#endif diff --git a/src/deps/skia/include/effects/SkBlurDrawLooper.h b/src/deps/skia/include/effects/SkBlurDrawLooper.h new file mode 100644 index 000000000..fc766f807 --- /dev/null +++ b/src/deps/skia/include/effects/SkBlurDrawLooper.h @@ -0,0 +1,26 @@ +/* + * Copyright 2008 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBlurDrawLooper_DEFINED +#define SkBlurDrawLooper_DEFINED + +#include "include/core/SkDrawLooper.h" + +#ifndef SK_SUPPORT_LEGACY_DRAWLOOPER +#error "SkDrawLooper is unsupported" +#endif + +/** + * DEPRECATED: No longer supported in Skia. + */ +namespace SkBlurDrawLooper { + sk_sp<SkDrawLooper> SK_API Make(SkColor4f color, SkColorSpace* cs, + SkScalar sigma, SkScalar dx, SkScalar dy); + sk_sp<SkDrawLooper> SK_API Make(SkColor color, SkScalar sigma, SkScalar dx, SkScalar dy); +} // namespace SkBlurDrawLooper + +#endif diff --git a/src/deps/skia/include/effects/SkBlurMaskFilter.h b/src/deps/skia/include/effects/SkBlurMaskFilter.h new file mode 100644 index 000000000..1b9319869 --- /dev/null +++ b/src/deps/skia/include/effects/SkBlurMaskFilter.h @@ -0,0 +1,35 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBlurMaskFilter_DEFINED +#define SkBlurMaskFilter_DEFINED + +// we include this since our callers will need to at least be able to ref/unref +#include "include/core/SkBlurTypes.h" +#include "include/core/SkMaskFilter.h" +#include "include/core/SkRect.h" +#include "include/core/SkScalar.h" + +class SkRRect; + +class SK_API SkBlurMaskFilter { +public: +#ifdef SK_SUPPORT_LEGACY_EMBOSSMASKFILTER + /** Create an emboss maskfilter + @param blurSigma standard deviation of the Gaussian blur to apply + before applying lighting (e.g. 3) + @param direction array of 3 scalars [x, y, z] specifying the direction of the light source + @param ambient 0...1 amount of ambient light + @param specular coefficient for specular highlights (e.g. 8) + @return the emboss maskfilter + */ + static sk_sp<SkMaskFilter> MakeEmboss(SkScalar blurSigma, const SkScalar direction[3], + SkScalar ambient, SkScalar specular); +#endif +}; + +#endif diff --git a/src/deps/skia/include/effects/SkColorMatrix.h b/src/deps/skia/include/effects/SkColorMatrix.h new file mode 100644 index 000000000..166028214 --- /dev/null +++ b/src/deps/skia/include/effects/SkColorMatrix.h @@ -0,0 +1,55 @@ +/* + * Copyright 2007 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkColorMatrix_DEFINED +#define SkColorMatrix_DEFINED + +#include "include/core/SkImageInfo.h" + +#include <algorithm> +#include <array> + +class SK_API SkColorMatrix { +public: + constexpr SkColorMatrix() : SkColorMatrix(1, 0, 0, 0, 0, + 0, 1, 0, 0, 0, + 0, 0, 1, 0, 0, + 0, 0, 0, 1, 0) {} + + constexpr SkColorMatrix(float m00, float m01, float m02, float m03, float m04, + float m10, float m11, float m12, float m13, float m14, + float m20, float m21, float m22, float m23, float m24, + float m30, float m31, float m32, float m33, float m34) + : fMat { m00, m01, m02, m03, m04, + m10, m11, m12, m13, m14, + m20, m21, m22, m23, m24, + m30, m31, m32, m33, m34 } {} + + static SkColorMatrix RGBtoYUV(SkYUVColorSpace); + static SkColorMatrix YUVtoRGB(SkYUVColorSpace); + + void setIdentity(); + void setScale(float rScale, float gScale, float bScale, float aScale = 1.0f); + + void postTranslate(float dr, float dg, float db, float da); + + void setConcat(const SkColorMatrix& a, const SkColorMatrix& b); + void preConcat(const SkColorMatrix& mat) { this->setConcat(*this, mat); } + void postConcat(const SkColorMatrix& mat) { this->setConcat(mat, *this); } + + void setSaturation(float sat); + + void setRowMajor(const float src[20]) { std::copy_n(src, 20, fMat.begin()); } + void getRowMajor(float dst[20]) const { std::copy_n(fMat.begin(), 20, dst); } + +private: + std::array<float, 20> fMat; + + friend class SkColorFilters; +}; + +#endif diff --git a/src/deps/skia/include/effects/SkColorMatrixFilter.h b/src/deps/skia/include/effects/SkColorMatrixFilter.h new file mode 100644 index 000000000..144c9685f --- /dev/null +++ b/src/deps/skia/include/effects/SkColorMatrixFilter.h @@ -0,0 +1,25 @@ +/* + * Copyright 2007 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkColorMatrixFilter_DEFINED +#define SkColorMatrixFilter_DEFINED + +#include "include/core/SkColorFilter.h" +#include "include/effects/SkColorMatrix.h" + +class SK_API SkColorMatrixFilter : public SkColorFilter { +public: + /** + * Create a colorfilter that multiplies the RGB channels by one color, and + * then adds a second color, pinning the result for each component to + * [0..255]. The alpha components of the mul and add arguments + * are ignored. + */ + static sk_sp<SkColorFilter> MakeLightingFilter(SkColor mul, SkColor add); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkCornerPathEffect.h b/src/deps/skia/include/effects/SkCornerPathEffect.h new file mode 100644 index 000000000..6017df46a --- /dev/null +++ b/src/deps/skia/include/effects/SkCornerPathEffect.h @@ -0,0 +1,28 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCornerPathEffect_DEFINED +#define SkCornerPathEffect_DEFINED + +#include "include/core/SkPathEffect.h" + +/** \class SkCornerPathEffect + + SkCornerPathEffect is a subclass of SkPathEffect that can turn sharp corners + into various treatments (e.g. rounded corners) +*/ +class SK_API SkCornerPathEffect { +public: + /** radius must be > 0 to have an effect. It specifies the distance from each corner + that should be "rounded". + */ + static sk_sp<SkPathEffect> Make(SkScalar radius); + + static void RegisterFlattenables(); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkDashPathEffect.h b/src/deps/skia/include/effects/SkDashPathEffect.h new file mode 100644 index 000000000..d6ca9122e --- /dev/null +++ b/src/deps/skia/include/effects/SkDashPathEffect.h @@ -0,0 +1,39 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkDashPathEffect_DEFINED +#define SkDashPathEffect_DEFINED + +#include "include/core/SkPathEffect.h" + +class SK_API SkDashPathEffect { +public: + /** intervals: array containing an even number of entries (>=2), with + the even indices specifying the length of "on" intervals, and the odd + indices specifying the length of "off" intervals. This array will be + copied in Make, and can be disposed of freely after. + count: number of elements in the intervals array + phase: offset into the intervals array (mod the sum of all of the + intervals). + + For example: if intervals[] = {10, 20}, count = 2, and phase = 25, + this will set up a dashed path like so: + 5 pixels off + 10 pixels on + 20 pixels off + 10 pixels on + 20 pixels off + ... + A phase of -5, 25, 55, 85, etc. would all result in the same path, + because the sum of all the intervals is 30. + + Note: only affects stroked paths. + */ + static sk_sp<SkPathEffect> Make(const SkScalar intervals[], int count, SkScalar phase); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkDiscretePathEffect.h b/src/deps/skia/include/effects/SkDiscretePathEffect.h new file mode 100644 index 000000000..6054cbdc9 --- /dev/null +++ b/src/deps/skia/include/effects/SkDiscretePathEffect.h @@ -0,0 +1,37 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkDiscretePathEffect_DEFINED +#define SkDiscretePathEffect_DEFINED + +#include "include/core/SkPathEffect.h" + +/** \class SkDiscretePathEffect + + This path effect chops a path into discrete segments, and randomly displaces them. +*/ +class SK_API SkDiscretePathEffect { +public: + /** Break the path into segments of segLength length, and randomly move the endpoints + away from the original path by a maximum of deviation. + Note: works on filled or framed paths + + @param seedAssist This is a caller-supplied seedAssist that modifies + the seed value that is used to randomize the path + segments' endpoints. If not supplied it defaults to 0, + in which case filtering a path multiple times will + result in the same set of segments (this is useful for + testing). If a caller does not want this behaviour + they can pass in a different seedAssist to get a + different set of path segments. + */ + static sk_sp<SkPathEffect> Make(SkScalar segLength, SkScalar dev, uint32_t seedAssist = 0); + + static void RegisterFlattenables(); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkGradientShader.h b/src/deps/skia/include/effects/SkGradientShader.h new file mode 100644 index 000000000..f8745c3b7 --- /dev/null +++ b/src/deps/skia/include/effects/SkGradientShader.h @@ -0,0 +1,264 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkGradientShader_DEFINED +#define SkGradientShader_DEFINED + +#include "include/core/SkShader.h" + +/** \class SkGradientShader + + SkGradientShader hosts factories for creating subclasses of SkShader that + render linear and radial gradients. In general, degenerate cases should not + produce surprising results, but there are several types of degeneracies: + + * A linear gradient made from the same two points. + * A radial gradient with a radius of zero. + * A sweep gradient where the start and end angle are the same. + * A two point conical gradient where the two centers and the two radii are + the same. + + For any degenerate gradient with a decal tile mode, it will draw empty since the interpolating + region is zero area and the outer region is discarded by the decal mode. + + For any degenerate gradient with a repeat or mirror tile mode, it will draw a solid color that + is the average gradient color, since infinitely many repetitions of the gradients will fill the + shape. + + For a clamped gradient, every type is well-defined at the limit except for linear gradients. The + radial gradient with zero radius becomes the last color. The sweep gradient draws the sector + from 0 to the provided angle with the first color, with a hardstop switching to the last color. + When the provided angle is 0, this is just the solid last color again. Similarly, the two point + conical gradient becomes a circle filled with the first color, sized to the provided radius, + with a hardstop switching to the last color. When the two radii are both zero, this is just the + solid last color. + + As a linear gradient approaches the degenerate case, its shader will approach the appearance of + two half planes, each filled by the first and last colors of the gradient. The planes will be + oriented perpendicular to the vector between the two defining points of the gradient. However, + once they become the same point, Skia cannot reconstruct what that expected orientation is. To + provide a stable and predictable color in this case, Skia just uses the last color as a solid + fill to be similar to many of the other degenerate gradients' behaviors in clamp mode. +*/ +class SK_API SkGradientShader { +public: + enum Flags { + /** By default gradients will interpolate their colors in unpremul space + * and then premultiply each of the results. By setting this flag, the + * gradients will premultiply their colors first, and then interpolate + * between them. + * example: https://fiddle.skia.org/c/@GradientShader_MakeLinear + */ + kInterpolateColorsInPremul_Flag = 1 << 0, + }; + + /** Returns a shader that generates a linear gradient between the two specified points. + <p /> + @param pts The start and end points for the gradient. + @param colors The array[count] of colors, to be distributed between the two points + @param pos May be NULL. array[count] of SkScalars, or NULL, of the relative position of + each corresponding color in the colors array. If this is NULL, + the the colors are distributed evenly between the start and end point. + If this is not null, the values must begin with 0, end with 1.0, and + intermediate values must be strictly increasing. + @param count Must be >=2. The number of colors (and pos if not NULL) entries. + @param mode The tiling mode + + example: https://fiddle.skia.org/c/@GradientShader_MakeLinear + */ + static sk_sp<SkShader> MakeLinear(const SkPoint pts[2], + const SkColor colors[], const SkScalar pos[], int count, + SkTileMode mode, + uint32_t flags, const SkMatrix* localMatrix); + static sk_sp<SkShader> MakeLinear(const SkPoint pts[2], + const SkColor colors[], const SkScalar pos[], int count, + SkTileMode mode) { + return MakeLinear(pts, colors, pos, count, mode, 0, nullptr); + } + + /** Returns a shader that generates a linear gradient between the two specified points. + <p /> + @param pts The start and end points for the gradient. + @param colors The array[count] of colors, to be distributed between the two points + @param pos May be NULL. array[count] of SkScalars, or NULL, of the relative position of + each corresponding color in the colors array. If this is NULL, + the the colors are distributed evenly between the start and end point. + If this is not null, the values must begin with 0, end with 1.0, and + intermediate values must be strictly increasing. + @param count Must be >=2. The number of colors (and pos if not NULL) entries. + @param mode The tiling mode + + example: https://fiddle.skia.org/c/@GradientShader_MakeLinear + */ + static sk_sp<SkShader> MakeLinear(const SkPoint pts[2], + const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace, + const SkScalar pos[], int count, SkTileMode mode, + uint32_t flags, const SkMatrix* localMatrix); + static sk_sp<SkShader> MakeLinear(const SkPoint pts[2], + const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace, + const SkScalar pos[], int count, SkTileMode mode) { + return MakeLinear(pts, colors, std::move(colorSpace), pos, count, mode, 0, nullptr); + } + + /** Returns a shader that generates a radial gradient given the center and radius. + <p /> + @param center The center of the circle for this gradient + @param radius Must be positive. The radius of the circle for this gradient + @param colors The array[count] of colors, to be distributed between the center and edge of the circle + @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of + each corresponding color in the colors array. If this is NULL, + the the colors are distributed evenly between the center and edge of the circle. + If this is not null, the values must begin with 0, end with 1.0, and + intermediate values must be strictly increasing. + @param count Must be >= 2. The number of colors (and pos if not NULL) entries + @param mode The tiling mode + */ + static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius, + const SkColor colors[], const SkScalar pos[], int count, + SkTileMode mode, + uint32_t flags, const SkMatrix* localMatrix); + static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius, + const SkColor colors[], const SkScalar pos[], int count, + SkTileMode mode) { + return MakeRadial(center, radius, colors, pos, count, mode, 0, nullptr); + } + + /** Returns a shader that generates a radial gradient given the center and radius. + <p /> + @param center The center of the circle for this gradient + @param radius Must be positive. The radius of the circle for this gradient + @param colors The array[count] of colors, to be distributed between the center and edge of the circle + @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of + each corresponding color in the colors array. If this is NULL, + the the colors are distributed evenly between the center and edge of the circle. + If this is not null, the values must begin with 0, end with 1.0, and + intermediate values must be strictly increasing. + @param count Must be >= 2. The number of colors (and pos if not NULL) entries + @param mode The tiling mode + */ + static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius, + const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace, + const SkScalar pos[], int count, SkTileMode mode, + uint32_t flags, const SkMatrix* localMatrix); + static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius, + const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace, + const SkScalar pos[], int count, SkTileMode mode) { + return MakeRadial(center, radius, colors, std::move(colorSpace), pos, count, mode, + 0, nullptr); + } + + /** + * Returns a shader that generates a conical gradient given two circles, or + * returns NULL if the inputs are invalid. The gradient interprets the + * two circles according to the following HTML spec. + * http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient + */ + static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius, + const SkPoint& end, SkScalar endRadius, + const SkColor colors[], const SkScalar pos[], + int count, SkTileMode mode, + uint32_t flags, const SkMatrix* localMatrix); + static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius, + const SkPoint& end, SkScalar endRadius, + const SkColor colors[], const SkScalar pos[], + int count, SkTileMode mode) { + return MakeTwoPointConical(start, startRadius, end, endRadius, colors, pos, count, mode, + 0, nullptr); + } + + /** + * Returns a shader that generates a conical gradient given two circles, or + * returns NULL if the inputs are invalid. The gradient interprets the + * two circles according to the following HTML spec. + * http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient + */ + static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius, + const SkPoint& end, SkScalar endRadius, + const SkColor4f colors[], + sk_sp<SkColorSpace> colorSpace, const SkScalar pos[], + int count, SkTileMode mode, + uint32_t flags, const SkMatrix* localMatrix); + static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius, + const SkPoint& end, SkScalar endRadius, + const SkColor4f colors[], + sk_sp<SkColorSpace> colorSpace, const SkScalar pos[], + int count, SkTileMode mode) { + return MakeTwoPointConical(start, startRadius, end, endRadius, colors, + std::move(colorSpace), pos, count, mode, 0, nullptr); + } + + /** Returns a shader that generates a sweep gradient given a center. + <p /> + @param cx The X coordinate of the center of the sweep + @param cx The Y coordinate of the center of the sweep + @param colors The array[count] of colors, to be distributed around the center, within + the gradient angle range. + @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative + position of each corresponding color in the colors array. If this is + NULL, then the colors are distributed evenly within the angular range. + If this is not null, the values must begin with 0, end with 1.0, and + intermediate values must be strictly increasing. + @param count Must be >= 2. The number of colors (and pos if not NULL) entries + @param mode Tiling mode: controls drawing outside of the gradient angular range. + @param startAngle Start of the angular range, corresponding to pos == 0. + @param endAngle End of the angular range, corresponding to pos == 1. + */ + static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy, + const SkColor colors[], const SkScalar pos[], int count, + SkTileMode mode, + SkScalar startAngle, SkScalar endAngle, + uint32_t flags, const SkMatrix* localMatrix); + static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy, + const SkColor colors[], const SkScalar pos[], int count, + uint32_t flags, const SkMatrix* localMatrix) { + return MakeSweep(cx, cy, colors, pos, count, SkTileMode::kClamp, 0, 360, flags, + localMatrix); + } + static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy, + const SkColor colors[], const SkScalar pos[], int count) { + return MakeSweep(cx, cy, colors, pos, count, 0, nullptr); + } + + /** Returns a shader that generates a sweep gradient given a center. + <p /> + @param cx The X coordinate of the center of the sweep + @param cx The Y coordinate of the center of the sweep + @param colors The array[count] of colors, to be distributed around the center, within + the gradient angle range. + @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative + position of each corresponding color in the colors array. If this is + NULL, then the colors are distributed evenly within the angular range. + If this is not null, the values must begin with 0, end with 1.0, and + intermediate values must be strictly increasing. + @param count Must be >= 2. The number of colors (and pos if not NULL) entries + @param mode Tiling mode: controls drawing outside of the gradient angular range. + @param startAngle Start of the angular range, corresponding to pos == 0. + @param endAngle End of the angular range, corresponding to pos == 1. + */ + static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy, + const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace, + const SkScalar pos[], int count, + SkTileMode mode, + SkScalar startAngle, SkScalar endAngle, + uint32_t flags, const SkMatrix* localMatrix); + static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy, + const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace, + const SkScalar pos[], int count, + uint32_t flags, const SkMatrix* localMatrix) { + return MakeSweep(cx, cy, colors, std::move(colorSpace), pos, count, + SkTileMode::kClamp, 0, 360, flags, localMatrix); + } + static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy, + const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace, + const SkScalar pos[], int count) { + return MakeSweep(cx, cy, colors, std::move(colorSpace), pos, count, 0, nullptr); + } + + static void RegisterFlattenables(); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkHighContrastFilter.h b/src/deps/skia/include/effects/SkHighContrastFilter.h new file mode 100644 index 000000000..90acec1cd --- /dev/null +++ b/src/deps/skia/include/effects/SkHighContrastFilter.h @@ -0,0 +1,80 @@ +/* +* Copyright 2017 Google Inc. +* +* Use of this source code is governed by a BSD-style license that can be +* found in the LICENSE file. +*/ + +#ifndef SkHighContrastFilter_DEFINED +#define SkHighContrastFilter_DEFINED + +#include "include/core/SkColorFilter.h" + +/** + * Configuration struct for SkHighContrastFilter. + * + * Provides transformations to improve contrast for users with low vision. + */ +struct SkHighContrastConfig { + enum class InvertStyle { + kNoInvert, + kInvertBrightness, + kInvertLightness, + + kLast = kInvertLightness + }; + + SkHighContrastConfig() { + fGrayscale = false; + fInvertStyle = InvertStyle::kNoInvert; + fContrast = 0.0f; + } + + SkHighContrastConfig(bool grayscale, + InvertStyle invertStyle, + SkScalar contrast) + : fGrayscale(grayscale), + fInvertStyle(invertStyle), + fContrast(contrast) {} + + // Returns true if all of the fields are set within the valid range. + bool isValid() const { + return fInvertStyle >= InvertStyle::kNoInvert && + fInvertStyle <= InvertStyle::kInvertLightness && + fContrast >= -1.0 && + fContrast <= 1.0; + } + + // If true, the color will be converted to grayscale. + bool fGrayscale; + + // Whether to invert brightness, lightness, or neither. + InvertStyle fInvertStyle; + + // After grayscale and inverting, the contrast can be adjusted linearly. + // The valid range is -1.0 through 1.0, where 0.0 is no adjustment. + SkScalar fContrast; +}; + +/** + * Color filter that provides transformations to improve contrast + * for users with low vision. + * + * Applies the following transformations in this order. Each of these + * can be configured using SkHighContrastConfig. + * + * - Conversion to grayscale + * - Color inversion (either in RGB or HSL space) + * - Increasing the resulting contrast. + * + * Calling SkHighContrastFilter::Make will return nullptr if the config is + * not valid, e.g. if you try to call it with a contrast outside the range of + * -1.0 to 1.0. + */ + +struct SK_API SkHighContrastFilter { + // Returns the filter, or nullptr if the config is invalid. + static sk_sp<SkColorFilter> Make(const SkHighContrastConfig& config); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkImageFilters.h b/src/deps/skia/include/effects/SkImageFilters.h new file mode 100644 index 000000000..144bfb8a6 --- /dev/null +++ b/src/deps/skia/include/effects/SkImageFilters.h @@ -0,0 +1,551 @@ +/* + * Copyright 2019 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImageFilters_DEFINED +#define SkImageFilters_DEFINED + +#include "include/core/SkBlendMode.h" +#include "include/core/SkColor.h" +#include "include/core/SkImage.h" +#include "include/core/SkImageFilter.h" +#include "include/core/SkPicture.h" +#include "include/core/SkRect.h" +#include "include/core/SkTileMode.h" +#include "include/core/SkTypes.h" +#include "include/effects/SkRuntimeEffect.h" + +#include <cstddef> + +class SkBlender; +class SkColorFilter; +class SkPaint; +class SkRegion; + +namespace skif { + static constexpr SkRect kNoCropRect = {SK_ScalarNegativeInfinity, SK_ScalarNegativeInfinity, + SK_ScalarInfinity, SK_ScalarInfinity}; +} + +// A set of factory functions providing useful SkImageFilter effects. For image filters that take an +// input filter, providing nullptr means it will automatically use the dynamic source image. This +// source depends on how the filter is applied, but is either the contents of a saved layer when +// drawing with SkCanvas, or an explicit SkImage if using SkImage::makeWithFilter. +class SK_API SkImageFilters { +public: + // This is just a convenience type to allow passing SkIRects, SkRects, and optional pointers + // to those types as a crop rect for the image filter factories. It's not intended to be used + // directly. + struct CropRect { + CropRect() : fCropRect(skif::kNoCropRect) {} + // Intentionally not explicit so callers don't have to use this type but can use SkIRect or + // SkRect as desired. + CropRect(std::nullptr_t) : fCropRect(skif::kNoCropRect) {} + CropRect(const SkIRect& crop) : fCropRect(SkRect::Make(crop)) {} + CropRect(const SkRect& crop) : fCropRect(crop) {} + CropRect(const SkIRect* optionalCrop) : fCropRect(optionalCrop ? SkRect::Make(*optionalCrop) + : skif::kNoCropRect) {} + CropRect(const SkRect* optionalCrop) : fCropRect(optionalCrop ? *optionalCrop + : skif::kNoCropRect) {} + + operator const SkRect*() const { return fCropRect == skif::kNoCropRect ? nullptr : &fCropRect; } + + SkRect fCropRect; + }; + + /** + * Create a filter that updates the alpha of the image based on 'region'. Pixels inside the + * region are made more opaque and pixels outside are made more transparent. + * + * Specifically, if a pixel is inside the region, its alpha will be set to + * max(innerMin, pixel's alpha). If a pixel is outside the region, its alpha will be updated to + * min(outerMax, pixel's alpha). + * @param region The geometric region controlling the inner and outer alpha thresholds. + * @param innerMin The minimum alpha value for pixels inside 'region'. + * @param outerMax The maximum alpha value for pixels outside of 'region'. + * @param input The input filter, or uses the source bitmap if this is null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> AlphaThreshold(const SkRegion& region, SkScalar innerMin, + SkScalar outerMax, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + /** + * Create a filter that implements a custom blend mode. Each output pixel is the result of + * combining the corresponding background and foreground pixels using the 4 coefficients: + * k1 * foreground * background + k2 * foreground + k3 * background + k4 + * @param k1, k2, k3, k4 The four coefficients used to combine the foreground and background. + * @param enforcePMColor If true, the RGB channels will be clamped to the calculated alpha. + * @param background The background content, using the source bitmap when this is null. + * @param foreground The foreground content, using the source bitmap when this is null. + * @param cropRect Optional rectangle that crops the inputs and output. + */ + static sk_sp<SkImageFilter> Arithmetic(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4, + bool enforcePMColor, sk_sp<SkImageFilter> background, + sk_sp<SkImageFilter> foreground, + const CropRect& cropRect = {}); + + /** + * This filter takes an SkBlendMode and uses it to composite the two filters together. + * @param mode The blend mode that defines the compositing operation + * @param background The Dst pixels used in blending, if null the source bitmap is used. + * @param foreground The Src pixels used in blending, if null the source bitmap is used. + * @cropRect Optional rectangle to crop input and output. + */ + static sk_sp<SkImageFilter> Blend(SkBlendMode mode, sk_sp<SkImageFilter> background, + sk_sp<SkImageFilter> foreground = nullptr, + const CropRect& cropRect = {}); + + /** + * This filter takes an SkBlendMode and uses it to composite the two filters together. + * @param blender The blender that defines the compositing operation + * @param background The Dst pixels used in blending, if null the source bitmap is used. + * @param foreground The Src pixels used in blending, if null the source bitmap is used. + * @cropRect Optional rectangle to crop input and output. + */ + static sk_sp<SkImageFilter> Blend(sk_sp<SkBlender> blender, sk_sp<SkImageFilter> background, + sk_sp<SkImageFilter> foreground = nullptr, + const CropRect& cropRect = {}); + + /** + * Create a filter that blurs its input by the separate X and Y sigmas. The provided tile mode + * is used when the blur kernel goes outside the input image. + * @param sigmaX The Gaussian sigma value for blurring along the X axis. + * @param sigmaY The Gaussian sigma value for blurring along the Y axis. + * @param tileMode The tile mode applied at edges . + * TODO (michaelludwig) - kMirror is not supported yet + * @param input The input filter that is blurred, uses source bitmap if this is null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> Blur(SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode, + sk_sp<SkImageFilter> input, const CropRect& cropRect = {}); + // As above, but defaults to the decal tile mode. + static sk_sp<SkImageFilter> Blur(SkScalar sigmaX, SkScalar sigmaY, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}) { + return Blur(sigmaX, sigmaY, SkTileMode::kDecal, std::move(input), cropRect); + } + + /** + * Create a filter that applies the color filter to the input filter results. + * @param cf The color filter that transforms the input image. + * @param input The input filter, or uses the source bitmap if this is null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> ColorFilter(sk_sp<SkColorFilter> cf, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + /** + * Create a filter that composes 'inner' with 'outer', such that the results of 'inner' are + * treated as the source bitmap passed to 'outer', i.e. result = outer(inner(source)). + * @param outer The outer filter that evaluates the results of inner. + * @param inner The inner filter that produces the input to outer. + */ + static sk_sp<SkImageFilter> Compose(sk_sp<SkImageFilter> outer, sk_sp<SkImageFilter> inner); + + /** + * Create a filter that moves each pixel in its color input based on an (x,y) vector encoded + * in its displacement input filter. Two color components of the displacement image are + * mapped into a vector as scale * (color[xChannel], color[yChannel]), where the channel + * selectors are one of R, G, B, or A. + * @param xChannelSelector RGBA channel that encodes the x displacement per pixel. + * @param yChannelSelector RGBA channel that encodes the y displacement per pixel. + * @param scale Scale applied to displacement extracted from image. + * @param displacement The filter defining the displacement image, or null to use source. + * @param color The filter providing the color pixels to be displaced. + * @param cropRect Optional rectangle that crops the color input and output. + */ + static sk_sp<SkImageFilter> DisplacementMap(SkColorChannel xChannelSelector, + SkColorChannel yChannelSelector, + SkScalar scale, sk_sp<SkImageFilter> displacement, + sk_sp<SkImageFilter> color, + const CropRect& cropRect = {}); + + /** + * Create a filter that draws a drop shadow under the input content. This filter produces an + * image that includes the inputs' content. + * @param dx The X offset of the shadow. + * @param dy The Y offset of the shadow. + * @param sigmaX The blur radius for the shadow, along the X axis. + * @param sigmaY The blur radius for the shadow, along the Y axis. + * @param color The color of the drop shadow. + * @param input The input filter, or will use the source bitmap if this is null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> DropShadow(SkScalar dx, SkScalar dy, + SkScalar sigmaX, SkScalar sigmaY, + SkColor color, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + /** + * Create a filter that renders a drop shadow, in exactly the same manner as ::DropShadow, + * except that the resulting image does not include the input content. This allows the shadow + * and input to be composed by a filter DAG in a more flexible manner. + * @param dx The X offset of the shadow. + * @param dy The Y offset of the shadow. + * @param sigmaX The blur radius for the shadow, along the X axis. + * @param sigmaY The blur radius for the shadow, along the Y axis. + * @param color The color of the drop shadow. + * @param input The input filter, or will use the source bitmap if this is null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> DropShadowOnly(SkScalar dx, SkScalar dy, + SkScalar sigmaX, SkScalar sigmaY, + SkColor color, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + /** + * Create a filter that draws the 'srcRect' portion of image into 'dstRect' using the given + * filter quality. Similar to SkCanvas::drawImageRect. Returns null if 'image' is null. + * @param image The image that is output by the filter, subset by 'srcRect'. + * @param srcRect The source pixels sampled into 'dstRect' + * @param dstRect The local rectangle to draw the image into. + * @param sampling The sampling to use when drawing the image. + */ + static sk_sp<SkImageFilter> Image(sk_sp<SkImage> image, const SkRect& srcRect, + const SkRect& dstRect, const SkSamplingOptions& sampling); + + /** + * Create a filter that draws the image using the given sampling. + * Similar to SkCanvas::drawImage. Returns null if 'image' is null. + * @param image The image that is output by the filter. + * @param sampling The sampling to use when drawing the image. + */ + static sk_sp<SkImageFilter> Image(sk_sp<SkImage> image, const SkSamplingOptions& sampling) { + if (image) { + SkRect r = SkRect::Make(image->bounds()); + return Image(std::move(image), r, r, sampling); + } else { + return nullptr; + } + } + + /** + * Create a filter that draws the image using Mitchel cubic resampling. + * @param image The image that is output by the filter. + */ + static sk_sp<SkImageFilter> Image(sk_sp<SkImage> image) { + return Image(std::move(image), SkSamplingOptions({1/3.0f, 1/3.0f})); + } + + /** + * Create a filter that mimics a zoom/magnifying lens effect. + * @param srcRect + * @param inset + * @param input The input filter that is magnified, if null the source bitmap is used. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> Magnifier(const SkRect& srcRect, SkScalar inset, + sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + /** + * Create a filter that applies an NxM image processing kernel to the input image. This can be + * used to produce effects such as sharpening, blurring, edge detection, etc. + * @param kernelSize The kernel size in pixels, in each dimension (N by M). + * @param kernel The image processing kernel. Must contain N * M elements, in row order. + * @param gain A scale factor applied to each pixel after convolution. This can be + * used to normalize the kernel, if it does not already sum to 1. + * @param bias A bias factor added to each pixel after convolution. + * @param kernelOffset An offset applied to each pixel coordinate before convolution. + * This can be used to center the kernel over the image + * (e.g., a 3x3 kernel should have an offset of {1, 1}). + * @param tileMode How accesses outside the image are treated. + * TODO (michaelludwig) - kMirror is not supported yet + * @param convolveAlpha If true, all channels are convolved. If false, only the RGB channels + * are convolved, and alpha is copied from the source image. + * @param input The input image filter, if null the source bitmap is used instead. + * @param cropRect Optional rectangle to which the output processing will be limited. + */ + static sk_sp<SkImageFilter> MatrixConvolution(const SkISize& kernelSize, + const SkScalar kernel[], SkScalar gain, + SkScalar bias, const SkIPoint& kernelOffset, + SkTileMode tileMode, bool convolveAlpha, + sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + /** + * Create a filter that transforms the input image by 'matrix'. This matrix transforms the + * local space, which means it effectively happens prior to any transformation coming from the + * SkCanvas initiating the filtering. + * @param matrix The matrix to apply to the original content. + * @param sampling How the image will be sampled when it is transformed + * @param input The image filter to transform, or null to use the source image. + */ + static sk_sp<SkImageFilter> MatrixTransform(const SkMatrix& matrix, + const SkSamplingOptions& sampling, + sk_sp<SkImageFilter> input); + + /** + * Create a filter that merges the 'count' filters together by drawing their results in order + * with src-over blending. + * @param filters The input filter array to merge, which must have 'count' elements. Any null + * filter pointers will use the source bitmap instead. + * @param count The number of input filters to be merged. + * @param cropRect Optional rectangle that crops all input filters and the output. + */ + static sk_sp<SkImageFilter> Merge(sk_sp<SkImageFilter>* const filters, int count, + const CropRect& cropRect = {}); + /** + * Create a filter that merges the results of the two filters together with src-over blending. + * @param first The first input filter, or the source bitmap if this is null. + * @param second The second input filter, or the source bitmap if this null. + * @param cropRect Optional rectangle that crops the inputs and output. + */ + static sk_sp<SkImageFilter> Merge(sk_sp<SkImageFilter> first, sk_sp<SkImageFilter> second, + const CropRect& cropRect = {}) { + sk_sp<SkImageFilter> array[] = { std::move(first), std::move(second) }; + return Merge(array, 2, cropRect); + } + + /** + * Create a filter that offsets the input filter by the given vector. + * @param dx The x offset in local space that the image is shifted. + * @param dy The y offset in local space that the image is shifted. + * @param input The input that will be moved, if null the source bitmap is used instead. + * @param cropRect Optional rectangle to crop the input and output. + */ + static sk_sp<SkImageFilter> Offset(SkScalar dx, SkScalar dy, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + /** + * Create a filter that fills the output with the given paint. + * @param paint The paint to fill + * @param cropRect Optional rectangle that will be filled. If null, the source bitmap's bounds + * are filled even though the source bitmap itself is not used. + * + * DEPRECATED: Use Shader() instead, since many features of SkPaint are ignored when filling + * the target output, and paint color/alpha can be emulated with SkShaders::Color(). + */ + static sk_sp<SkImageFilter> Paint(const SkPaint& paint, const CropRect& cropRect = {}); + + /** + * Create a filter that produces the SkPicture as its output, drawn into targetRect. Note that + * the targetRect is not the same as the SkIRect cropRect that many filters accept. Returns + * null if 'pic' is null. + * @param pic The picture that is drawn for the filter output. + * @param targetRect The drawing region for the picture. + */ + static sk_sp<SkImageFilter> Picture(sk_sp<SkPicture> pic, const SkRect& targetRect); + // As above, but uses SkPicture::cullRect for the drawing region. + static sk_sp<SkImageFilter> Picture(sk_sp<SkPicture> pic) { + SkRect target = pic ? pic->cullRect() : SkRect::MakeEmpty(); + return Picture(std::move(pic), target); + } + +#ifdef SK_ENABLE_SKSL + /** + * Create a filter that fills the output with the per-pixel evaluation of the SkShader produced + * by the SkRuntimeShaderBuilder. The shader is defined in the image filter's local coordinate + * system, so it will automatically be affected by SkCanvas' transform. + * + * @param builder The builder used to produce the runtime shader, that will in turn + * fill the result image + * @param childShaderName The name of the child shader defined in the builder that will be + * bound to the input param (or the source image if the input param + * is null). If null the builder can have exactly one child shader, + * which automatically binds the input param. + * @param input The image filter that will be provided as input to the runtime + * shader. If null the implicit source image is used instead + */ + static sk_sp<SkImageFilter> RuntimeShader(const SkRuntimeShaderBuilder& builder, + const char* childShaderName, + sk_sp<SkImageFilter> input); + + /** + * Create a filter that fills the output with the per-pixel evaluation of the SkShader produced + * by the SkRuntimeShaderBuilder. The shader is defined in the image filter's local coordinate + * system, so it will automatically be affected by SkCanvas' transform. + * + * @param builder The builder used to produce the runtime shader, that will in turn + * fill the result image + * @param childShaderNames The names of the child shaders defined in the builder that will be + * bound to the input params (or the source image if the input param + * is null). If any name is null, or appears more than once, factory + * fails and returns nullptr. + * @param inputs The image filters that will be provided as input to the runtime + * shader. If any are null, the implicit source image is used instead. + * @param inputCount How many entries are present in 'childShaderNames' and 'inputs'. + */ + static sk_sp<SkImageFilter> RuntimeShader(const SkRuntimeShaderBuilder& builder, + const char* childShaderNames[], + const sk_sp<SkImageFilter> inputs[], + int inputCount); +#endif // SK_ENABLE_SKSL + + enum class Dither : bool { + kNo = false, + kYes = true + }; + + /** + * Create a filter that fills the output with the per-pixel evaluation of the SkShader. The + * shader is defined in the image filter's local coordinate system, so will automatically + * be affected by SkCanvas' transform. + * + * Like Image() and Picture(), this is a leaf filter that can be used to introduce inputs to + * a complex filter graph, but should generally be combined with a filter that as at least + * one null input to use the implicit source image. + * @param shader The shader that fills the result image + */ + static sk_sp<SkImageFilter> Shader(sk_sp<SkShader> shader, const CropRect& cropRect = {}) { + return Shader(std::move(shader), Dither::kNo, cropRect); + } + static sk_sp<SkImageFilter> Shader(sk_sp<SkShader> shader, Dither dither, + const CropRect& cropRect = {}); + + /** + * Create a tile image filter. + * @param src Defines the pixels to tile + * @param dst Defines the pixel region that the tiles will be drawn to + * @param input The input that will be tiled, if null the source bitmap is used instead. + */ + static sk_sp<SkImageFilter> Tile(const SkRect& src, const SkRect& dst, + sk_sp<SkImageFilter> input); + + // Morphology filter effects + + /** + * Create a filter that dilates each input pixel's channel values to the max value within the + * given radii along the x and y axes. + * @param radiusX The distance to dilate along the x axis to either side of each pixel. + * @param radiusY The distance to dilate along the y axis to either side of each pixel. + * @param input The image filter that is dilated, using source bitmap if this is null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> Dilate(SkScalar radiusX, SkScalar radiusY, + sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + /** + * Create a filter that erodes each input pixel's channel values to the minimum channel value + * within the given radii along the x and y axes. + * @param radiusX The distance to erode along the x axis to either side of each pixel. + * @param radiusY The distance to erode along the y axis to either side of each pixel. + * @param input The image filter that is eroded, using source bitmap if this is null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> Erode(SkScalar radiusX, SkScalar radiusY, + sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + // Lighting filter effects + + /** + * Create a filter that calculates the diffuse illumination from a distant light source, + * interpreting the alpha channel of the input as the height profile of the surface (to + * approximate normal vectors). + * @param direction The direction to the distance light. + * @param lightColor The color of the diffuse light source. + * @param surfaceScale Scale factor to transform from alpha values to physical height. + * @param kd Diffuse reflectance coefficient. + * @param input The input filter that defines surface normals (as alpha), or uses the + * source bitmap when null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> DistantLitDiffuse(const SkPoint3& direction, SkColor lightColor, + SkScalar surfaceScale, SkScalar kd, + sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + /** + * Create a filter that calculates the diffuse illumination from a point light source, using + * alpha channel of the input as the height profile of the surface (to approximate normal + * vectors). + * @param location The location of the point light. + * @param lightColor The color of the diffuse light source. + * @param surfaceScale Scale factor to transform from alpha values to physical height. + * @param kd Diffuse reflectance coefficient. + * @param input The input filter that defines surface normals (as alpha), or uses the + * source bitmap when null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> PointLitDiffuse(const SkPoint3& location, SkColor lightColor, + SkScalar surfaceScale, SkScalar kd, + sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + /** + * Create a filter that calculates the diffuse illumination from a spot light source, using + * alpha channel of the input as the height profile of the surface (to approximate normal + * vectors). The spot light is restricted to be within 'cutoffAngle' of the vector between + * the location and target. + * @param location The location of the spot light. + * @param target The location that the spot light is point towards + * @param falloffExponent Exponential falloff parameter for illumination outside of cutoffAngle + * @param cutoffAngle Maximum angle from lighting direction that receives full light + * @param lightColor The color of the diffuse light source. + * @param surfaceScale Scale factor to transform from alpha values to physical height. + * @param kd Diffuse reflectance coefficient. + * @param input The input filter that defines surface normals (as alpha), or uses the + * source bitmap when null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> SpotLitDiffuse(const SkPoint3& location, const SkPoint3& target, + SkScalar falloffExponent, SkScalar cutoffAngle, + SkColor lightColor, SkScalar surfaceScale, + SkScalar kd, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + + /** + * Create a filter that calculates the specular illumination from a distant light source, + * interpreting the alpha channel of the input as the height profile of the surface (to + * approximate normal vectors). + * @param direction The direction to the distance light. + * @param lightColor The color of the specular light source. + * @param surfaceScale Scale factor to transform from alpha values to physical height. + * @param ks Specular reflectance coefficient. + * @param shininess The specular exponent determining how shiny the surface is. + * @param input The input filter that defines surface normals (as alpha), or uses the + * source bitmap when null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> DistantLitSpecular(const SkPoint3& direction, SkColor lightColor, + SkScalar surfaceScale, SkScalar ks, + SkScalar shininess, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + /** + * Create a filter that calculates the specular illumination from a point light source, using + * alpha channel of the input as the height profile of the surface (to approximate normal + * vectors). + * @param location The location of the point light. + * @param lightColor The color of the specular light source. + * @param surfaceScale Scale factor to transform from alpha values to physical height. + * @param ks Specular reflectance coefficient. + * @param shininess The specular exponent determining how shiny the surface is. + * @param input The input filter that defines surface normals (as alpha), or uses the + * source bitmap when null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> PointLitSpecular(const SkPoint3& location, SkColor lightColor, + SkScalar surfaceScale, SkScalar ks, + SkScalar shininess, sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + /** + * Create a filter that calculates the specular illumination from a spot light source, using + * alpha channel of the input as the height profile of the surface (to approximate normal + * vectors). The spot light is restricted to be within 'cutoffAngle' of the vector between + * the location and target. + * @param location The location of the spot light. + * @param target The location that the spot light is point towards + * @param falloffExponent Exponential falloff parameter for illumination outside of cutoffAngle + * @param cutoffAngle Maximum angle from lighting direction that receives full light + * @param lightColor The color of the specular light source. + * @param surfaceScale Scale factor to transform from alpha values to physical height. + * @param ks Specular reflectance coefficient. + * @param shininess The specular exponent determining how shiny the surface is. + * @param input The input filter that defines surface normals (as alpha), or uses the + * source bitmap when null. + * @param cropRect Optional rectangle that crops the input and output. + */ + static sk_sp<SkImageFilter> SpotLitSpecular(const SkPoint3& location, const SkPoint3& target, + SkScalar falloffExponent, SkScalar cutoffAngle, + SkColor lightColor, SkScalar surfaceScale, + SkScalar ks, SkScalar shininess, + sk_sp<SkImageFilter> input, + const CropRect& cropRect = {}); + +private: + SkImageFilters() = delete; +}; + +#endif // SkImageFilters_DEFINED diff --git a/src/deps/skia/include/effects/SkLayerDrawLooper.h b/src/deps/skia/include/effects/SkLayerDrawLooper.h new file mode 100644 index 000000000..1e875b58c --- /dev/null +++ b/src/deps/skia/include/effects/SkLayerDrawLooper.h @@ -0,0 +1,161 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkLayerDrawLooper_DEFINED +#define SkLayerDrawLooper_DEFINED + +#include "include/core/SkBlendMode.h" +#include "include/core/SkDrawLooper.h" +#include "include/core/SkPaint.h" +#include "include/core/SkPoint.h" + +#ifndef SK_SUPPORT_LEGACY_DRAWLOOPER +#error "SkDrawLooper is unsupported" +#endif + +/** + * DEPRECATED: No longer supported by Skia. + */ +class SK_API SkLayerDrawLooper : public SkDrawLooper { +public: + ~SkLayerDrawLooper() override; + + /** + * Bits specifies which aspects of the layer's paint should replace the + * corresponding aspects on the draw's paint. + * kEntirePaint_Bits means use the layer's paint completely. + * 0 means ignore the layer's paint... except for fColorMode, which is + * always applied. + */ + enum Bits { + kStyle_Bit = 1 << 0, //!< use this layer's Style/stroke settings + kPathEffect_Bit = 1 << 2, //!< use this layer's patheffect + kMaskFilter_Bit = 1 << 3, //!< use this layer's maskfilter + kShader_Bit = 1 << 4, //!< use this layer's shader + kColorFilter_Bit = 1 << 5, //!< use this layer's colorfilter + kXfermode_Bit = 1 << 6, //!< use this layer's xfermode + + // unsupported kTextSkewX_Bit = 1 << 1, + + /** + * Use the layer's paint entirely, with these exceptions: + * - We never override the draw's paint's text_encoding, since that is + * used to interpret the text/len parameters in draw[Pos]Text. + * - Color is always computed using the LayerInfo's fColorMode. + */ + kEntirePaint_Bits = -1 + + }; + typedef int32_t BitFlags; + + /** + * Info for how to apply the layer's paint and offset. + * + * fColorMode controls how we compute the final color for the layer: + * The layer's paint's color is treated as the SRC + * The draw's paint's color is treated as the DST + * final-color = Mode(layers-color, draws-color); + * Any SkBlendMode will work. Two common choices are: + * kSrc: to use the layer's color, ignoring the draw's + * kDst: to just keep the draw's color, ignoring the layer's + */ + struct SK_API LayerInfo { + BitFlags fPaintBits; + SkBlendMode fColorMode; + SkVector fOffset; + bool fPostTranslate; //!< applies to fOffset + + /** + * Initial the LayerInfo. Defaults to settings that will draw the + * layer with no changes: e.g. + * fPaintBits == 0 + * fColorMode == kDst_Mode + * fOffset == (0, 0) + */ + LayerInfo(); + }; + + SkDrawLooper::Context* makeContext(SkArenaAlloc*) const override; + + bool asABlurShadow(BlurShadowRec* rec) const override; + +protected: + SkLayerDrawLooper(); + + void flatten(SkWriteBuffer&) const override; + +private: + SK_FLATTENABLE_HOOKS(SkLayerDrawLooper) + + struct Rec { + Rec* fNext; + SkPaint fPaint; + LayerInfo fInfo; + }; + Rec* fRecs; + int fCount; + + // state-machine during the init/next cycle + class LayerDrawLooperContext : public SkDrawLooper::Context { + public: + explicit LayerDrawLooperContext(const SkLayerDrawLooper* looper); + + protected: + bool next(Info*, SkPaint* paint) override; + + private: + Rec* fCurrRec; + + static void ApplyInfo(SkPaint* dst, const SkPaint& src, const LayerInfo&); + }; + + using INHERITED = SkDrawLooper; + +public: + class SK_API Builder { + public: + Builder(); + + ~Builder(); + + /** + * Call for each layer you want to add (from top to bottom). + * This returns a paint you can modify, but that ptr is only valid until + * the next call made to addLayer(). + */ + SkPaint* addLayer(const LayerInfo&); + + /** + * This layer will draw with the original paint, at the specified offset + */ + void addLayer(SkScalar dx, SkScalar dy); + + /** + * This layer will with the original paint and no offset. + */ + void addLayer() { this->addLayer(0, 0); } + + /// Similar to addLayer, but adds a layer to the top. + SkPaint* addLayerOnTop(const LayerInfo&); + + /** + * Pass list of layers on to newly built looper and return it. This will + * also reset the builder, so it can be used to build another looper. + */ + sk_sp<SkDrawLooper> detach(); + + private: + Builder(const Builder&) = delete; + Builder& operator=(const Builder&) = delete; + + Rec* fRecs; + Rec* fTopRec; + int fCount; + }; +}; + +#endif diff --git a/src/deps/skia/include/effects/SkLumaColorFilter.h b/src/deps/skia/include/effects/SkLumaColorFilter.h new file mode 100644 index 000000000..c230f524e --- /dev/null +++ b/src/deps/skia/include/effects/SkLumaColorFilter.h @@ -0,0 +1,34 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkLumaColorFilter_DEFINED +#define SkLumaColorFilter_DEFINED + +#include "include/core/SkColorFilter.h" + +/** + * SkLumaColorFilter multiplies the luma of its input into the alpha channel, + * and sets the red, green, and blue channels to zero. + * + * SkLumaColorFilter(r,g,b,a) = {0,0,0, a * luma(r,g,b)} + * + * This is similar to a luminanceToAlpha feColorMatrix, + * but note how this filter folds in the previous alpha, + * something an feColorMatrix cannot do. + * + * feColorMatrix(luminanceToAlpha; r,g,b,a) = {0,0,0, luma(r,g,b)} + * + * (Despite its name, an feColorMatrix using luminanceToAlpha does + * actually compute luma, a dot-product of gamma-encoded color channels, + * not luminance, a dot-product of linear color channels. So at least + * SkLumaColorFilter and feColorMatrix+luminanceToAlpha agree there.) + */ +struct SK_API SkLumaColorFilter { + static sk_sp<SkColorFilter> Make(); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkOpPathEffect.h b/src/deps/skia/include/effects/SkOpPathEffect.h new file mode 100644 index 000000000..268789ed4 --- /dev/null +++ b/src/deps/skia/include/effects/SkOpPathEffect.h @@ -0,0 +1,39 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkOpPathEffect_DEFINED +#define SkOpPathEffect_DEFINED + +#include "include/core/SkMatrix.h" +#include "include/core/SkPaint.h" +#include "include/core/SkPathEffect.h" +#include "include/pathops/SkPathOps.h" + +class SK_API SkMergePathEffect { +public: + /* Defers to two other patheffects, and then combines their outputs using the specified op. + * e.g. + * result = output_one op output_two + * + * If either one or two is nullptr, then the original path is passed through to the op. + */ + static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two, SkPathOp op); +}; + +class SK_API SkMatrixPathEffect { +public: + static sk_sp<SkPathEffect> MakeTranslate(SkScalar dx, SkScalar dy); + static sk_sp<SkPathEffect> Make(const SkMatrix&); +}; + +class SK_API SkStrokePathEffect { +public: + static sk_sp<SkPathEffect> Make(SkScalar width, SkPaint::Join, SkPaint::Cap, + SkScalar miter = 4); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkOverdrawColorFilter.h b/src/deps/skia/include/effects/SkOverdrawColorFilter.h new file mode 100644 index 000000000..0d6fc9282 --- /dev/null +++ b/src/deps/skia/include/effects/SkOverdrawColorFilter.h @@ -0,0 +1,29 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "include/core/SkColorFilter.h" +#include "include/core/SkFlattenable.h" + +#ifndef SkOverdrawColorFilter_DEFINED +#define SkOverdrawColorFilter_DEFINED + +/** + * Uses the value in the src alpha channel to set the dst pixel. + * 0 -> colors[0] + * 1 -> colors[1] + * ... + * 5 (or larger) -> colors[5] + * + */ +class SK_API SkOverdrawColorFilter { +public: + static constexpr int kNumColors = 6; + + static sk_sp<SkColorFilter> MakeWithSkColors(const SkColor[kNumColors]); +}; + +#endif // SkOverdrawColorFilter_DEFINED diff --git a/src/deps/skia/include/effects/SkPerlinNoiseShader.h b/src/deps/skia/include/effects/SkPerlinNoiseShader.h new file mode 100644 index 000000000..f94b3420f --- /dev/null +++ b/src/deps/skia/include/effects/SkPerlinNoiseShader.h @@ -0,0 +1,54 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPerlinNoiseShader_DEFINED +#define SkPerlinNoiseShader_DEFINED + +#include "include/core/SkShader.h" + +/** \class SkPerlinNoiseShader + + SkPerlinNoiseShader creates an image using the Perlin turbulence function. + + It can produce tileable noise if asked to stitch tiles and provided a tile size. + In order to fill a large area with repeating noise, set the stitchTiles flag to + true, and render exactly a single tile of noise. Without this flag, the result + will contain visible seams between tiles. + + The algorithm used is described here : + http://www.w3.org/TR/SVG/filters.html#feTurbulenceElement +*/ +class SK_API SkPerlinNoiseShader { +public: + /** + * This will construct Perlin noise of the given type (Fractal Noise or Turbulence). + * + * Both base frequencies (X and Y) have a usual range of (0..1) and must be non-negative. + * + * The number of octaves provided should be fairly small, with a limit of 255 enforced. + * Each octave doubles the frequency, so 10 octaves would produce noise from + * baseFrequency * 1, * 2, * 4, ..., * 512, which quickly yields insignificantly small + * periods and resembles regular unstructured noise rather than Perlin noise. + * + * If tileSize isn't NULL or an empty size, the tileSize parameter will be used to modify + * the frequencies so that the noise will be tileable for the given tile size. If tileSize + * is NULL or an empty size, the frequencies will be used as is without modification. + */ + static sk_sp<SkShader> MakeFractalNoise(SkScalar baseFrequencyX, SkScalar baseFrequencyY, + int numOctaves, SkScalar seed, + const SkISize* tileSize = nullptr); + static sk_sp<SkShader> MakeTurbulence(SkScalar baseFrequencyX, SkScalar baseFrequencyY, + int numOctaves, SkScalar seed, + const SkISize* tileSize = nullptr); + + static void RegisterFlattenables(); + +private: + SkPerlinNoiseShader() = delete; +}; + +#endif diff --git a/src/deps/skia/include/effects/SkRuntimeEffect.h b/src/deps/skia/include/effects/SkRuntimeEffect.h new file mode 100644 index 000000000..d5002592a --- /dev/null +++ b/src/deps/skia/include/effects/SkRuntimeEffect.h @@ -0,0 +1,518 @@ +/* + * Copyright 2019 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRuntimeEffect_DEFINED +#define SkRuntimeEffect_DEFINED + +#include "include/core/SkBlender.h" +#include "include/core/SkColorFilter.h" +#include "include/core/SkData.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkMatrix.h" +#include "include/core/SkShader.h" +#include "include/core/SkSpan.h" +#include "include/core/SkString.h" +#include "include/private/SkOnce.h" +#include "include/private/SkSLSampleUsage.h" +#include "include/private/SkTOptional.h" + +#include <string> +#include <vector> + +#ifdef SK_ENABLE_SKSL + +class GrRecordingContext; +class SkFilterColorProgram; +class SkImage; +class SkRuntimeImageFilter; + +namespace SkSL { +class DebugTrace; +class ErrorReporter; +class FunctionDefinition; +struct Program; +enum class ProgramKind : int8_t; +struct ProgramSettings; +} // namespace SkSL + +namespace skvm { +class Program; +} // namespace skvm + +/* + * SkRuntimeEffect supports creating custom SkShader and SkColorFilter objects using Skia's SkSL + * shading language. + * + * NOTE: This API is experimental and subject to change. + */ +class SK_API SkRuntimeEffect : public SkRefCnt { +public: + // Reflected description of a uniform variable in the effect's SkSL + struct Uniform { + enum class Type { + kFloat, + kFloat2, + kFloat3, + kFloat4, + kFloat2x2, + kFloat3x3, + kFloat4x4, + kInt, + kInt2, + kInt3, + kInt4, + }; + + enum Flags { + // Uniform is an declared as an array. 'count' contains array length. + kArray_Flag = 0x1, + + // Uniform is declared with layout(color). Colors should be supplied as unpremultiplied, + // extended-range (unclamped) sRGB (ie SkColor4f). The uniform will be automatically + // transformed to unpremultiplied extended-range working-space colors. + kColor_Flag = 0x2, + }; + + SkString name; + size_t offset; + Type type; + int count; + uint32_t flags; + + bool isArray() const { return SkToBool(this->flags & kArray_Flag); } + bool isColor() const { return SkToBool(this->flags & kColor_Flag); } + size_t sizeInBytes() const; + }; + + // Reflected description of a uniform child (shader or colorFilter) in the effect's SkSL + enum class ChildType { + kShader, + kColorFilter, + kBlender, + }; + + struct Child { + SkString name; + ChildType type; + int index; + }; + + class Options { + public: + // For testing purposes, completely disable the inliner. (Normally, Runtime Effects don't + // run the inliner directly, but they still get an inlining pass once they are painted.) + bool forceNoInline = false; + + private: + friend class SkRuntimeEffect; + friend class SkRuntimeEffectPriv; + + // This flag lifts the ES2 restrictions on Runtime Effects that are gated by the + // `strictES2Mode` check. Be aware that the software renderer and pipeline-stage effect are + // still largely ES3-unaware and can still fail or crash if post-ES2 features are used. + // This is only intended for use by tests and certain internally created effects. + bool enforceES2Restrictions = true; + + // Similarly: Public SkSL does not allow access to sk_FragCoord. The semantics of that + // variable are confusing, and expose clients to implementation details of saveLayer and + // image filters. + bool allowFragCoord = false; + }; + + // If the effect is compiled successfully, `effect` will be non-null. + // Otherwise, `errorText` will contain the reason for failure. + struct Result { + sk_sp<SkRuntimeEffect> effect; + SkString errorText; + }; + + // MakeForColorFilter and MakeForShader verify that the SkSL code is valid for those stages of + // the Skia pipeline. In all of the signatures described below, color parameters and return + // values are flexible. They are listed as being 'vec4', but they can also be 'half4' or + // 'float4'. ('vec4' is an alias for 'float4'). + + // We can't use a default argument for `options` due to a bug in Clang. + // https://bugs.llvm.org/show_bug.cgi?id=36684 + + // Color filter SkSL requires an entry point that looks like: + // vec4 main(vec4 inColor) { ... } + static Result MakeForColorFilter(SkString sksl, const Options&); + static Result MakeForColorFilter(SkString sksl) { + return MakeForColorFilter(std::move(sksl), Options{}); + } + + // Shader SkSL requires an entry point that looks like: + // vec4 main(vec2 inCoords) { ... } + // -or- + // vec4 main(vec2 inCoords, vec4 inColor) { ... } + // + // Most shaders don't use the input color, so that parameter is optional. + static Result MakeForShader(SkString sksl, const Options&); + static Result MakeForShader(SkString sksl) { + return MakeForShader(std::move(sksl), Options{}); + } + + // Blend SkSL requires an entry point that looks like: + // vec4 main(vec4 srcColor, vec4 dstColor) { ... } + static Result MakeForBlender(SkString sksl, const Options&); + static Result MakeForBlender(SkString sksl) { + return MakeForBlender(std::move(sksl), Options{}); + } + + // DSL entry points + static Result MakeForColorFilter(std::unique_ptr<SkSL::Program> program, const Options&); + static Result MakeForColorFilter(std::unique_ptr<SkSL::Program> program); + + static Result MakeForShader(std::unique_ptr<SkSL::Program> program, const Options&); + static Result MakeForShader(std::unique_ptr<SkSL::Program> program); + static sk_sp<SkRuntimeEffect> MakeForShader(std::unique_ptr<SkSL::Program> program, + const Options&, SkSL::ErrorReporter* errors); + + + static Result MakeForBlender(std::unique_ptr<SkSL::Program> program, const Options&); + static Result MakeForBlender(std::unique_ptr<SkSL::Program> program); + + // Object that allows passing a SkShader, SkColorFilter or SkBlender as a child + class ChildPtr { + public: + ChildPtr() = default; + ChildPtr(sk_sp<SkShader> s) : fChild(std::move(s)) {} + ChildPtr(sk_sp<SkColorFilter> cf) : fChild(std::move(cf)) {} + ChildPtr(sk_sp<SkBlender> b) : fChild(std::move(b)) {} + + skstd::optional<ChildType> type() const; + + SkShader* shader() const; + SkColorFilter* colorFilter() const; + SkBlender* blender() const; + SkFlattenable* flattenable() const { return fChild.get(); } + + private: + sk_sp<SkFlattenable> fChild; + }; + + sk_sp<SkShader> makeShader(sk_sp<SkData> uniforms, + sk_sp<SkShader> children[], + size_t childCount, + const SkMatrix* localMatrix, + bool isOpaque) const; + sk_sp<SkShader> makeShader(sk_sp<SkData> uniforms, + SkSpan<ChildPtr> children, + const SkMatrix* localMatrix, + bool isOpaque) const; + + sk_sp<SkImage> makeImage(GrRecordingContext*, + sk_sp<SkData> uniforms, + SkSpan<ChildPtr> children, + const SkMatrix* localMatrix, + SkImageInfo resultInfo, + bool mipmapped) const; + + sk_sp<SkColorFilter> makeColorFilter(sk_sp<SkData> uniforms) const; + sk_sp<SkColorFilter> makeColorFilter(sk_sp<SkData> uniforms, + sk_sp<SkColorFilter> children[], + size_t childCount) const; + sk_sp<SkColorFilter> makeColorFilter(sk_sp<SkData> uniforms, + SkSpan<ChildPtr> children) const; + + sk_sp<SkBlender> makeBlender(sk_sp<SkData> uniforms, SkSpan<ChildPtr> children = {}) const; + + /** + * Creates a new Runtime Effect patterned after an already-existing one. The new shader behaves + * like the original, but also creates a debug trace of its execution at the requested + * coordinate. After painting with this shader, the associated DebugTrace object will contain a + * shader execution trace. Call `writeTrace` on the debug trace object to generate a full trace + * suitable for a debugger, or call `dump` to emit a human-readable trace. + * + * Debug traces are only supported on a raster (non-GPU) canvas. + + * Debug traces are currently only supported on shaders. Color filter and blender tracing is a + * work-in-progress. + */ + struct TracedShader { + sk_sp<SkShader> shader; + sk_sp<SkSL::DebugTrace> debugTrace; + }; + static TracedShader MakeTraced(sk_sp<SkShader> shader, const SkIPoint& traceCoord); + + // Returns the SkSL source of the runtime effect shader. + const std::string& source() const; + + // Combined size of all 'uniform' variables. When calling makeColorFilter or makeShader, + // provide an SkData of this size, containing values for all of those variables. + size_t uniformSize() const; + + SkSpan<const Uniform> uniforms() const { return SkMakeSpan(fUniforms); } + SkSpan<const Child> children() const { return SkMakeSpan(fChildren); } + + // Returns pointer to the named uniform variable's description, or nullptr if not found + const Uniform* findUniform(const char* name) const; + + // Returns pointer to the named child's description, or nullptr if not found + const Child* findChild(const char* name) const; + + static void RegisterFlattenables(); + ~SkRuntimeEffect() override; + +private: + enum Flags { + kUsesSampleCoords_Flag = 0x01, + kAllowColorFilter_Flag = 0x02, + kAllowShader_Flag = 0x04, + kAllowBlender_Flag = 0x08, + kSamplesOutsideMain_Flag = 0x10, + kUsesColorTransform_Flag = 0x20, + }; + + SkRuntimeEffect(std::unique_ptr<SkSL::Program> baseProgram, + const Options& options, + const SkSL::FunctionDefinition& main, + std::vector<Uniform>&& uniforms, + std::vector<Child>&& children, + std::vector<SkSL::SampleUsage>&& sampleUsages, + uint32_t flags); + + sk_sp<SkRuntimeEffect> makeUnoptimizedClone(); + + static Result MakeFromSource(SkString sksl, const Options& options, SkSL::ProgramKind kind); + + static Result MakeFromDSL(std::unique_ptr<SkSL::Program> program, + const Options& options, + SkSL::ProgramKind kind); + + static sk_sp<SkRuntimeEffect> MakeFromDSL(std::unique_ptr<SkSL::Program> program, + const Options& options, + SkSL::ProgramKind kind, + SkSL::ErrorReporter* errors); + + static Result MakeInternal(std::unique_ptr<SkSL::Program> program, + const Options& options, + SkSL::ProgramKind kind); + + static SkSL::ProgramSettings MakeSettings(const Options& options, bool optimize); + + uint32_t hash() const { return fHash; } + bool usesSampleCoords() const { return (fFlags & kUsesSampleCoords_Flag); } + bool allowShader() const { return (fFlags & kAllowShader_Flag); } + bool allowColorFilter() const { return (fFlags & kAllowColorFilter_Flag); } + bool allowBlender() const { return (fFlags & kAllowBlender_Flag); } + bool samplesOutsideMain() const { return (fFlags & kSamplesOutsideMain_Flag); } + bool usesColorTransform() const { return (fFlags & kUsesColorTransform_Flag); } + + const SkFilterColorProgram* getFilterColorProgram(); + +#if SK_SUPPORT_GPU + friend class GrSkSLFP; // fBaseProgram, fSampleUsages + friend class GrGLSLSkSLFP; // +#endif + + friend class SkRTShader; // fBaseProgram, fMain + friend class SkRuntimeBlender; // + friend class SkRuntimeColorFilter; // + + friend class SkFilterColorProgram; + friend class SkRuntimeEffectPriv; + + uint32_t fHash; + + std::unique_ptr<SkSL::Program> fBaseProgram; + const SkSL::FunctionDefinition& fMain; + std::vector<Uniform> fUniforms; + std::vector<Child> fChildren; + std::vector<SkSL::SampleUsage> fSampleUsages; + + std::unique_ptr<SkFilterColorProgram> fFilterColorProgram; + + uint32_t fFlags; // Flags +}; + +/** Base class for SkRuntimeShaderBuilder, defined below. */ +class SkRuntimeEffectBuilder { +public: + struct BuilderUniform { + // Copy 'val' to this variable. No type conversion is performed - 'val' must be same + // size as expected by the effect. Information about the variable can be queried by + // looking at fVar. If the size is incorrect, no copy will be performed, and debug + // builds will abort. If this is the result of querying a missing variable, fVar will + // be nullptr, and assigning will also do nothing (and abort in debug builds). + template <typename T> + std::enable_if_t<std::is_trivially_copyable<T>::value, BuilderUniform&> operator=( + const T& val) { + if (!fVar) { + SkDEBUGFAIL("Assigning to missing variable"); + } else if (sizeof(val) != fVar->sizeInBytes()) { + SkDEBUGFAIL("Incorrect value size"); + } else { + memcpy(SkTAddOffset<void>(fOwner->writableUniformData(), fVar->offset), + &val, sizeof(val)); + } + return *this; + } + + BuilderUniform& operator=(const SkMatrix& val) { + if (!fVar) { + SkDEBUGFAIL("Assigning to missing variable"); + } else if (fVar->sizeInBytes() != 9 * sizeof(float)) { + SkDEBUGFAIL("Incorrect value size"); + } else { + float* data = SkTAddOffset<float>(fOwner->writableUniformData(), + (ptrdiff_t)fVar->offset); + data[0] = val.get(0); data[1] = val.get(3); data[2] = val.get(6); + data[3] = val.get(1); data[4] = val.get(4); data[5] = val.get(7); + data[6] = val.get(2); data[7] = val.get(5); data[8] = val.get(8); + } + return *this; + } + + template <typename T> + bool set(const T val[], const int count) { + static_assert(std::is_trivially_copyable<T>::value, "Value must be trivial copyable"); + if (!fVar) { + SkDEBUGFAIL("Assigning to missing variable"); + return false; + } else if (sizeof(T) * count != fVar->sizeInBytes()) { + SkDEBUGFAIL("Incorrect value size"); + return false; + } else { + memcpy(SkTAddOffset<void>(fOwner->writableUniformData(), fVar->offset), + val, sizeof(T) * count); + } + return true; + } + + SkRuntimeEffectBuilder* fOwner; + const SkRuntimeEffect::Uniform* fVar; // nullptr if the variable was not found + }; + + struct BuilderChild { + template <typename T> BuilderChild& operator=(sk_sp<T> val) { + if (!fChild) { + SkDEBUGFAIL("Assigning to missing child"); + } else { + fOwner->fChildren[(size_t)fChild->index] = std::move(val); + } + return *this; + } + + BuilderChild& operator=(std::nullptr_t) { + if (!fChild) { + SkDEBUGFAIL("Assigning to missing child"); + } else { + fOwner->fChildren[(size_t)fChild->index] = SkRuntimeEffect::ChildPtr{}; + } + return *this; + } + + SkRuntimeEffectBuilder* fOwner; + const SkRuntimeEffect::Child* fChild; // nullptr if the child was not found + }; + + const SkRuntimeEffect* effect() const { return fEffect.get(); } + + BuilderUniform uniform(const char* name) { return { this, fEffect->findUniform(name) }; } + BuilderChild child(const char* name) { + const SkRuntimeEffect::Child* child = fEffect->findChild(name); + return { this, child }; + } + +protected: + SkRuntimeEffectBuilder() = delete; + explicit SkRuntimeEffectBuilder(sk_sp<SkRuntimeEffect> effect) + : fEffect(std::move(effect)) + , fUniforms(SkData::MakeZeroInitialized(fEffect->uniformSize())) + , fChildren(fEffect->children().size()) {} + explicit SkRuntimeEffectBuilder(sk_sp<SkRuntimeEffect> effect, sk_sp<SkData> uniforms) + : fEffect(std::move(effect)) + , fUniforms(std::move(uniforms)) + , fChildren(fEffect->children().size()) {} + + SkRuntimeEffectBuilder(SkRuntimeEffectBuilder&&) = default; + SkRuntimeEffectBuilder(const SkRuntimeEffectBuilder&) = default; + + SkRuntimeEffectBuilder& operator=(SkRuntimeEffectBuilder&&) = delete; + SkRuntimeEffectBuilder& operator=(const SkRuntimeEffectBuilder&) = delete; + + sk_sp<SkData> uniforms() { return fUniforms; } + SkRuntimeEffect::ChildPtr* children() { return fChildren.data(); } + size_t numChildren() { return fChildren.size(); } + +private: + void* writableUniformData() { + if (!fUniforms->unique()) { + fUniforms = SkData::MakeWithCopy(fUniforms->data(), fUniforms->size()); + } + return fUniforms->writable_data(); + } + + sk_sp<SkRuntimeEffect> fEffect; + sk_sp<SkData> fUniforms; + std::vector<SkRuntimeEffect::ChildPtr> fChildren; +}; + +/** + * SkRuntimeShaderBuilder is a utility to simplify creating SkShader objects from SkRuntimeEffects. + * + * NOTE: Like SkRuntimeEffect, this API is experimental and subject to change! + * + * Given an SkRuntimeEffect, the SkRuntimeShaderBuilder manages creating an input data block and + * provides named access to the 'uniform' variables in that block, as well as named access + * to a list of child shader slots. Usage: + * + * sk_sp<SkRuntimeEffect> effect = ...; + * SkRuntimeShaderBuilder builder(effect); + * builder.uniform("some_uniform_float") = 3.14f; + * builder.uniform("some_uniform_matrix") = SkM44::Rotate(...); + * builder.child("some_child_effect") = mySkImage->makeShader(...); + * ... + * sk_sp<SkShader> shader = builder.makeShader(nullptr, false); + * + * Note that SkRuntimeShaderBuilder is built entirely on the public API of SkRuntimeEffect, + * so can be used as-is or serve as inspiration for other interfaces or binding techniques. + */ +class SK_API SkRuntimeShaderBuilder : public SkRuntimeEffectBuilder { +public: + explicit SkRuntimeShaderBuilder(sk_sp<SkRuntimeEffect>); + // This is currently required by Android Framework but may go away if that dependency + // can be removed. + SkRuntimeShaderBuilder(const SkRuntimeShaderBuilder&) = default; + ~SkRuntimeShaderBuilder(); + + sk_sp<SkShader> makeShader(const SkMatrix* localMatrix, bool isOpaque); + sk_sp<SkImage> makeImage(GrRecordingContext*, + const SkMatrix* localMatrix, + SkImageInfo resultInfo, + bool mipmapped); + +private: + using INHERITED = SkRuntimeEffectBuilder; + + explicit SkRuntimeShaderBuilder(sk_sp<SkRuntimeEffect> effect, sk_sp<SkData> uniforms) + : INHERITED(std::move(effect), std::move(uniforms)) {} + + friend class SkRuntimeImageFilter; +}; + +/** + * SkRuntimeBlendBuilder is a utility to simplify creation and uniform setup of runtime blenders. + */ +class SK_API SkRuntimeBlendBuilder : public SkRuntimeEffectBuilder { +public: + explicit SkRuntimeBlendBuilder(sk_sp<SkRuntimeEffect>); + ~SkRuntimeBlendBuilder(); + + SkRuntimeBlendBuilder(const SkRuntimeBlendBuilder&) = delete; + SkRuntimeBlendBuilder& operator=(const SkRuntimeBlendBuilder&) = delete; + + sk_sp<SkBlender> makeBlender(); + +private: + using INHERITED = SkRuntimeEffectBuilder; +}; + +#endif // SK_ENABLE_SKSL + +#endif // SkRuntimeEffect_DEFINED diff --git a/src/deps/skia/include/effects/SkShaderMaskFilter.h b/src/deps/skia/include/effects/SkShaderMaskFilter.h new file mode 100644 index 000000000..2b25d367a --- /dev/null +++ b/src/deps/skia/include/effects/SkShaderMaskFilter.h @@ -0,0 +1,24 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkShaderMaskFilter_DEFINED +#define SkShaderMaskFilter_DEFINED + +#include "include/core/SkMaskFilter.h" + +class SkShader; + +class SK_API SkShaderMaskFilter { +public: + static sk_sp<SkMaskFilter> Make(sk_sp<SkShader> shader); + +private: + static void RegisterFlattenables(); + friend class SkFlattenable; +}; + +#endif diff --git a/src/deps/skia/include/effects/SkStrokeAndFillPathEffect.h b/src/deps/skia/include/effects/SkStrokeAndFillPathEffect.h new file mode 100644 index 000000000..fbde64933 --- /dev/null +++ b/src/deps/skia/include/effects/SkStrokeAndFillPathEffect.h @@ -0,0 +1,28 @@ +/* + * Copyright 2020 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkStrokeAndFillPathEffect_DEFINED +#define SkStrokeAndFillPathEffect_DEFINED + +#include "include/core/SkPaint.h" +#include "include/core/SkPathEffect.h" +#include "include/pathops/SkPathOps.h" + +class SK_API SkStrokeAndFillPathEffect { +public: + /* If the paint is set to stroke, this will add the stroke and fill geometries + * together (hoping that the winding-direction works out). + * + * If the paint is set to fill, this effect is ignored. + * + * Note that if the paint is set to stroke and the stroke-width is 0, then + * this will turn the geometry into just a fill. + */ + static sk_sp<SkPathEffect> Make(); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkTableColorFilter.h b/src/deps/skia/include/effects/SkTableColorFilter.h new file mode 100644 index 000000000..ab964aa20 --- /dev/null +++ b/src/deps/skia/include/effects/SkTableColorFilter.h @@ -0,0 +1,42 @@ +/* +* Copyright 2015 Google Inc. +* +* Use of this source code is governed by a BSD-style license that can be +* found in the LICENSE file. +*/ + +#ifndef SkTableColorFilter_DEFINED +#define SkTableColorFilter_DEFINED + +#include "include/core/SkColorFilter.h" + +class SK_API SkTableColorFilter { +public: + /** + * Create a table colorfilter, copying the table into the filter, and + * applying it to all 4 components. + * a' = table[a]; + * r' = table[r]; + * g' = table[g]; + * b' = table[b]; + * Compoents are operated on in unpremultiplied space. If the incomming + * colors are premultiplied, they are temporarily unpremultiplied, then + * the table is applied, and then the result is remultiplied. + */ + static sk_sp<SkColorFilter> Make(const uint8_t table[256]); + + /** + * Create a table colorfilter, with a different table for each + * component [A, R, G, B]. If a given table is NULL, then it is + * treated as identity, with the component left unchanged. If a table + * is not null, then its contents are copied into the filter. + */ + static sk_sp<SkColorFilter> MakeARGB(const uint8_t tableA[256], + const uint8_t tableR[256], + const uint8_t tableG[256], + const uint8_t tableB[256]); + + static void RegisterFlattenables(); +}; + +#endif diff --git a/src/deps/skia/include/effects/SkTableMaskFilter.h b/src/deps/skia/include/effects/SkTableMaskFilter.h new file mode 100644 index 000000000..03535a6f9 --- /dev/null +++ b/src/deps/skia/include/effects/SkTableMaskFilter.h @@ -0,0 +1,37 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTableMaskFilter_DEFINED +#define SkTableMaskFilter_DEFINED + +#include "include/core/SkMaskFilter.h" +#include "include/core/SkScalar.h" + +/** \class SkTableMaskFilter + + Applies a table lookup on each of the alpha values in the mask. + Helper methods create some common tables (e.g. gamma, clipping) + */ +class SK_API SkTableMaskFilter { +public: + /** Utility that sets the gamma table + */ + static void MakeGammaTable(uint8_t table[256], SkScalar gamma); + + /** Utility that creates a clipping table: clamps values below min to 0 + and above max to 255, and rescales the remaining into 0..255 + */ + static void MakeClipTable(uint8_t table[256], uint8_t min, uint8_t max); + + static SkMaskFilter* Create(const uint8_t table[256]); + static SkMaskFilter* CreateGamma(SkScalar gamma); + static SkMaskFilter* CreateClip(uint8_t min, uint8_t max); + + SkTableMaskFilter() = delete; +}; + +#endif diff --git a/src/deps/skia/include/effects/SkTrimPathEffect.h b/src/deps/skia/include/effects/SkTrimPathEffect.h new file mode 100644 index 000000000..66de9f44b --- /dev/null +++ b/src/deps/skia/include/effects/SkTrimPathEffect.h @@ -0,0 +1,41 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTrimPathEffect_DEFINED +#define SkTrimPathEffect_DEFINED + +#include "include/core/SkPathEffect.h" + +class SK_API SkTrimPathEffect { +public: + enum class Mode { + kNormal, // return the subset path [start,stop] + kInverted, // return the complement/subset paths [0,start] + [stop,1] + }; + + /** + * Take start and stop "t" values (values between 0...1), and return a path that is that + * subset of the original path. + * + * e.g. + * Make(0.5, 1.0) --> return the 2nd half of the path + * Make(0.33333, 0.66667) --> return the middle third of the path + * + * The trim values apply to the entire path, so if it contains several contours, all of them + * are including in the calculation. + * + * startT and stopT must be 0..1 inclusive. If they are outside of that interval, they will + * be pinned to the nearest legal value. If either is NaN, null will be returned. + * + * Note: for Mode::kNormal, this will return one (logical) segment (even if it is spread + * across multiple contours). For Mode::kInverted, this will return 2 logical + * segments: stopT..1 and 0...startT, in this order. + */ + static sk_sp<SkPathEffect> Make(SkScalar startT, SkScalar stopT, Mode = Mode::kNormal); +}; + +#endif diff --git a/src/deps/skia/include/encode/BUILD.bazel b/src/deps/skia/include/encode/BUILD.bazel new file mode 100644 index 000000000..fc728d0d0 --- /dev/null +++ b/src/deps/skia/include/encode/BUILD.bazel @@ -0,0 +1,36 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkEncoder_hdr", + hdrs = ["SkEncoder.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkPixmap_hdr", + "//include/private:SkNoncopyable_hdr", + "//include/private:SkTemplates_hdr", + ], +) + +generated_cc_atom( + name = "SkJpegEncoder_hdr", + hdrs = ["SkJpegEncoder.h"], + visibility = ["//:__subpackages__"], + deps = [":SkEncoder_hdr"], +) + +generated_cc_atom( + name = "SkPngEncoder_hdr", + hdrs = ["SkPngEncoder.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkEncoder_hdr", + "//include/core:SkDataTable_hdr", + ], +) + +generated_cc_atom( + name = "SkWebpEncoder_hdr", + hdrs = ["SkWebpEncoder.h"], + visibility = ["//:__subpackages__"], + deps = [":SkEncoder_hdr"], +) diff --git a/src/deps/skia/include/encode/SkEncoder.h b/src/deps/skia/include/encode/SkEncoder.h new file mode 100644 index 000000000..1a9c37e7f --- /dev/null +++ b/src/deps/skia/include/encode/SkEncoder.h @@ -0,0 +1,42 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkEncoder_DEFINED +#define SkEncoder_DEFINED + +#include "include/core/SkPixmap.h" +#include "include/private/SkNoncopyable.h" +#include "include/private/SkTemplates.h" + +class SK_API SkEncoder : SkNoncopyable { +public: + + /** + * Encode |numRows| rows of input. If the caller requests more rows than are remaining + * in the src, this will encode all of the remaining rows. |numRows| must be greater + * than zero. + */ + bool encodeRows(int numRows); + + virtual ~SkEncoder() {} + +protected: + + virtual bool onEncodeRows(int numRows) = 0; + + SkEncoder(const SkPixmap& src, size_t storageBytes) + : fSrc(src) + , fCurrRow(0) + , fStorage(storageBytes) + {} + + const SkPixmap& fSrc; + int fCurrRow; + SkAutoTMalloc<uint8_t> fStorage; +}; + +#endif diff --git a/src/deps/skia/include/encode/SkJpegEncoder.h b/src/deps/skia/include/encode/SkJpegEncoder.h new file mode 100644 index 000000000..f2107f126 --- /dev/null +++ b/src/deps/skia/include/encode/SkJpegEncoder.h @@ -0,0 +1,97 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkJpegEncoder_DEFINED +#define SkJpegEncoder_DEFINED + +#include "include/encode/SkEncoder.h" + +class SkJpegEncoderMgr; +class SkWStream; + +class SK_API SkJpegEncoder : public SkEncoder { +public: + + enum class AlphaOption { + kIgnore, + kBlendOnBlack, + }; + + enum class Downsample { + /** + * Reduction by a factor of two in both the horizontal and vertical directions. + */ + k420, + + /** + * Reduction by a factor of two in the horizontal direction. + */ + k422, + + /** + * No downsampling. + */ + k444, + }; + + struct Options { + /** + * |fQuality| must be in [0, 100] where 0 corresponds to the lowest quality. + */ + int fQuality = 100; + + /** + * Choose the downsampling factor for the U and V components. This is only + * meaningful if the |src| is not kGray, since kGray will not be encoded as YUV. + * + * Our default value matches the libjpeg-turbo default. + */ + Downsample fDownsample = Downsample::k420; + + /** + * Jpegs must be opaque. This instructs the encoder on how to handle input + * images with alpha. + * + * The default is to ignore the alpha channel and treat the image as opaque. + * Another option is to blend the pixels onto a black background before encoding. + * In the second case, the encoder supports linear or legacy blending. + */ + AlphaOption fAlphaOption = AlphaOption::kIgnore; + }; + + /** + * Encode the |src| pixels to the |dst| stream. + * |options| may be used to control the encoding behavior. + * + * Returns true on success. Returns false on an invalid or unsupported |src|. + */ + static bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options); + + /** + * Create a jpeg encoder that will encode the |src| pixels to the |dst| stream. + * |options| may be used to control the encoding behavior. + * + * |dst| is unowned but must remain valid for the lifetime of the object. + * + * This returns nullptr on an invalid or unsupported |src|. + */ + static std::unique_ptr<SkEncoder> Make(SkWStream* dst, const SkPixmap& src, + const Options& options); + + ~SkJpegEncoder() override; + +protected: + bool onEncodeRows(int numRows) override; + +private: + SkJpegEncoder(std::unique_ptr<SkJpegEncoderMgr>, const SkPixmap& src); + + std::unique_ptr<SkJpegEncoderMgr> fEncoderMgr; + using INHERITED = SkEncoder; +}; + +#endif diff --git a/src/deps/skia/include/encode/SkPngEncoder.h b/src/deps/skia/include/encode/SkPngEncoder.h new file mode 100644 index 000000000..ccfa292f7 --- /dev/null +++ b/src/deps/skia/include/encode/SkPngEncoder.h @@ -0,0 +1,99 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPngEncoder_DEFINED +#define SkPngEncoder_DEFINED + +#include "include/core/SkDataTable.h" +#include "include/encode/SkEncoder.h" + +class SkPngEncoderMgr; +class SkWStream; + +class SK_API SkPngEncoder : public SkEncoder { +public: + + enum class FilterFlag : int { + kZero = 0x00, + kNone = 0x08, + kSub = 0x10, + kUp = 0x20, + kAvg = 0x40, + kPaeth = 0x80, + kAll = kNone | kSub | kUp | kAvg | kPaeth, + }; + + struct Options { + /** + * Selects which filtering strategies to use. + * + * If a single filter is chosen, libpng will use that filter for every row. + * + * If multiple filters are chosen, libpng will use a heuristic to guess which filter + * will encode smallest, then apply that filter. This happens on a per row basis, + * different rows can use different filters. + * + * Using a single filter (or less filters) is typically faster. Trying all of the + * filters may help minimize the output file size. + * + * Our default value matches libpng's default. + */ + FilterFlag fFilterFlags = FilterFlag::kAll; + + /** + * Must be in [0, 9] where 9 corresponds to maximal compression. This value is passed + * directly to zlib. 0 is a special case to skip zlib entirely, creating dramatically + * larger pngs. + * + * Our default value matches libpng's default. + */ + int fZLibLevel = 6; + + /** + * Represents comments in the tEXt ancillary chunk of the png. + * The 2i-th entry is the keyword for the i-th comment, + * and the (2i + 1)-th entry is the text for the i-th comment. + */ + sk_sp<SkDataTable> fComments; + }; + + /** + * Encode the |src| pixels to the |dst| stream. + * |options| may be used to control the encoding behavior. + * + * Returns true on success. Returns false on an invalid or unsupported |src|. + */ + static bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options); + + /** + * Create a png encoder that will encode the |src| pixels to the |dst| stream. + * |options| may be used to control the encoding behavior. + * + * |dst| is unowned but must remain valid for the lifetime of the object. + * + * This returns nullptr on an invalid or unsupported |src|. + */ + static std::unique_ptr<SkEncoder> Make(SkWStream* dst, const SkPixmap& src, + const Options& options); + + ~SkPngEncoder() override; + +protected: + bool onEncodeRows(int numRows) override; + + SkPngEncoder(std::unique_ptr<SkPngEncoderMgr>, const SkPixmap& src); + + std::unique_ptr<SkPngEncoderMgr> fEncoderMgr; + using INHERITED = SkEncoder; +}; + +static inline SkPngEncoder::FilterFlag operator|(SkPngEncoder::FilterFlag x, + SkPngEncoder::FilterFlag y) { + return (SkPngEncoder::FilterFlag)((int)x | (int)y); +} + +#endif diff --git a/src/deps/skia/include/encode/SkWebpEncoder.h b/src/deps/skia/include/encode/SkWebpEncoder.h new file mode 100644 index 000000000..6d1c85689 --- /dev/null +++ b/src/deps/skia/include/encode/SkWebpEncoder.h @@ -0,0 +1,48 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkWebpEncoder_DEFINED +#define SkWebpEncoder_DEFINED + +#include "include/encode/SkEncoder.h" + +class SkWStream; + +namespace SkWebpEncoder { + + enum class Compression { + kLossy, + kLossless, + }; + + struct SK_API Options { + /** + * |fCompression| determines whether we will use webp lossy or lossless compression. + * + * |fQuality| must be in [0.0f, 100.0f]. + * If |fCompression| is kLossy, |fQuality| corresponds to the visual quality of the + * encoding. Decreasing the quality will result in a smaller encoded image. + * If |fCompression| is kLossless, |fQuality| corresponds to the amount of effort + * put into the encoding. Lower values will compress faster into larger files, + * while larger values will compress slower into smaller files. + * + * This scheme is designed to match the libwebp API. + */ + Compression fCompression = Compression::kLossy; + float fQuality = 100.0f; + }; + + /** + * Encode the |src| pixels to the |dst| stream. + * |options| may be used to control the encoding behavior. + * + * Returns true on success. Returns false on an invalid or unsupported |src|. + */ + SK_API bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options); +} // namespace SkWebpEncoder + +#endif diff --git a/src/deps/skia/include/gpu/BUILD.bazel b/src/deps/skia/include/gpu/BUILD.bazel new file mode 100644 index 000000000..9c81d2948 --- /dev/null +++ b/src/deps/skia/include/gpu/BUILD.bazel @@ -0,0 +1,168 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrBackendDrawableInfo_hdr", + hdrs = ["GrBackendDrawableInfo.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrTypes_hdr", + "//include/gpu/vk:GrVkTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrBackendSemaphore_hdr", + hdrs = ["GrBackendSemaphore.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrTypes_hdr", + "//include/gpu/gl:GrGLTypes_hdr", + "//include/gpu/mtl:GrMtlTypes_hdr", + "//include/gpu/vk:GrVkTypes_hdr", + "//include/private:GrD3DTypesMinimal_hdr", + ], +) + +generated_cc_atom( + name = "GrBackendSurfaceMutableState_hdr", + hdrs = ["GrBackendSurfaceMutableState.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrTypes_hdr", + "//include/private:GrVkTypesPriv_hdr", + ], +) + +generated_cc_atom( + name = "GrBackendSurface_hdr", + hdrs = ["GrBackendSurface.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrBackendSurfaceMutableState_hdr", + ":GrSurfaceInfo_hdr", + ":GrTypes_hdr", + "//include/gpu/dawn:GrDawnTypes_hdr", + "//include/gpu/gl:GrGLTypes_hdr", + "//include/gpu/mock:GrMockTypes_hdr", + "//include/gpu/mtl:GrMtlTypes_hdr", + "//include/gpu/vk:GrVkTypes_hdr", + "//include/private:GrD3DTypesMinimal_hdr", + "//include/private:GrGLTypesPriv_hdr", + "//include/private:GrVkTypesPriv_hdr", + ], +) + +generated_cc_atom( + name = "GrConfig_hdr", + hdrs = ["GrConfig.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "GrContextOptions_hdr", + hdrs = ["GrContextOptions.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrDriverBugWorkarounds_hdr", + ":GrTypes_hdr", + ":ShaderErrorHandler_hdr", + "//include/core:SkData_hdr", + "//include/core:SkString_hdr", + "//include/core:SkTypes_hdr", + "//include/private:GrTypesPriv_hdr", + ], +) + +generated_cc_atom( + name = "GrContextThreadSafeProxy_hdr", + hdrs = ["GrContextThreadSafeProxy.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrContextOptions_hdr", + ":GrTypes_hdr", + "//include/core:SkImageInfo_hdr", + "//include/core:SkRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "GrDirectContext_hdr", + hdrs = ["GrDirectContext.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrBackendSurface_hdr", + ":GrRecordingContext_hdr", + "//include/core:SkUnPreMultiply_hdr", + ], +) + +generated_cc_atom( + name = "GrDriverBugWorkaroundsAutogen_hdr", + hdrs = ["GrDriverBugWorkaroundsAutogen.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "GrDriverBugWorkarounds_hdr", + hdrs = ["GrDriverBugWorkarounds.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrDriverBugWorkaroundsAutogen_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrRecordingContext_hdr", + hdrs = ["GrRecordingContext.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/private:GrImageContext_hdr", + "//include/private:SkTArray_hdr", + ], +) + +generated_cc_atom( + name = "GrSurfaceInfo_hdr", + hdrs = ["GrSurfaceInfo.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrTypes_hdr", + "//include/private:GrD3DTypesMinimal_hdr", + "//include/private:GrDawnTypesPriv_hdr", + "//include/private:GrGLTypesPriv_hdr", + "//include/private:GrMockTypesPriv_hdr", + "//include/private:GrMtlTypesPriv_hdr", + "//include/private:GrVkTypesPriv_hdr", + ], +) + +generated_cc_atom( + name = "GrTypes_hdr", + hdrs = ["GrTypes.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrConfig_hdr", + "//include/core:SkMath_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrYUVABackendTextures_hdr", + hdrs = ["GrYUVABackendTextures.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrBackendSurface_hdr", + "//include/core:SkYUVAInfo_hdr", + ], +) + +generated_cc_atom( + name = "ShaderErrorHandler_hdr", + hdrs = ["ShaderErrorHandler.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) diff --git a/src/deps/skia/include/gpu/GrBackendDrawableInfo.h b/src/deps/skia/include/gpu/GrBackendDrawableInfo.h new file mode 100644 index 000000000..bda1e769f --- /dev/null +++ b/src/deps/skia/include/gpu/GrBackendDrawableInfo.h @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrBackendDrawableInfo_DEFINED +#define GrBackendDrawableInfo_DEFINED + +#include "include/gpu/GrTypes.h" + +#include "include/gpu/vk/GrVkTypes.h" + +class SK_API GrBackendDrawableInfo { +public: + // Creates an invalid backend drawable info. + GrBackendDrawableInfo() : fIsValid(false) {} + + GrBackendDrawableInfo(const GrVkDrawableInfo& info) + : fIsValid(true) + , fBackend(GrBackendApi::kVulkan) + , fVkInfo(info) {} + + // Returns true if the backend texture has been initialized. + bool isValid() const { return fIsValid; } + + GrBackendApi backend() const { return fBackend; } + + bool getVkDrawableInfo(GrVkDrawableInfo* outInfo) const { + if (this->isValid() && GrBackendApi::kVulkan == fBackend) { + *outInfo = fVkInfo; + return true; + } + return false; + } + +private: + bool fIsValid; + GrBackendApi fBackend; + GrVkDrawableInfo fVkInfo; +}; + +#endif diff --git a/src/deps/skia/include/gpu/GrBackendSemaphore.h b/src/deps/skia/include/gpu/GrBackendSemaphore.h new file mode 100644 index 000000000..83d70adf0 --- /dev/null +++ b/src/deps/skia/include/gpu/GrBackendSemaphore.h @@ -0,0 +1,140 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrBackendSemaphore_DEFINED +#define GrBackendSemaphore_DEFINED + +#include "include/gpu/GrTypes.h" + +#include "include/gpu/gl/GrGLTypes.h" + +#ifdef SK_METAL +#include "include/gpu/mtl/GrMtlTypes.h" +#endif + +#ifdef SK_VULKAN +#include "include/gpu/vk/GrVkTypes.h" +#endif + +#ifdef SK_DIRECT3D +#include "include/private/GrD3DTypesMinimal.h" +#endif + +/** + * Wrapper class for passing into and receiving data from Ganesh about a backend semaphore object. + */ +class GrBackendSemaphore { +public: + // For convenience we just set the backend here to OpenGL. The GrBackendSemaphore cannot be used + // until either init* is called, which will set the appropriate GrBackend. + GrBackendSemaphore() + : fBackend(GrBackendApi::kOpenGL), fGLSync(nullptr), fIsInitialized(false) {} + +#ifdef SK_DIRECT3D + // We only need to specify these if Direct3D is enabled, because it requires special copy + // characteristics. + ~GrBackendSemaphore(); + GrBackendSemaphore(const GrBackendSemaphore&); + GrBackendSemaphore& operator=(const GrBackendSemaphore&); +#endif + + void initGL(GrGLsync sync) { + fBackend = GrBackendApi::kOpenGL; + fGLSync = sync; + fIsInitialized = true; + } + +#ifdef SK_VULKAN + void initVulkan(VkSemaphore semaphore) { + fBackend = GrBackendApi::kVulkan; + fVkSemaphore = semaphore; + + fIsInitialized = true; + } + + VkSemaphore vkSemaphore() const { + if (!fIsInitialized || GrBackendApi::kVulkan != fBackend) { + return VK_NULL_HANDLE; + } + return fVkSemaphore; + } +#endif + +#ifdef SK_METAL + // It is the creator's responsibility to ref the MTLEvent passed in here, via __bridge_retained. + // The other end will wrap this BackendSemaphore and take the ref, via __bridge_transfer. + void initMetal(GrMTLHandle event, uint64_t value) { + fBackend = GrBackendApi::kMetal; + fMtlEvent = event; + fMtlValue = value; + + fIsInitialized = true; + } + + GrMTLHandle mtlSemaphore() const { + if (!fIsInitialized || GrBackendApi::kMetal != fBackend) { + return nullptr; + } + return fMtlEvent; + } + + uint64_t mtlValue() const { + if (!fIsInitialized || GrBackendApi::kMetal != fBackend) { + return 0; + } + return fMtlValue; + } + +#endif + +#ifdef SK_DIRECT3D + void initDirect3D(const GrD3DFenceInfo& info) { + fBackend = GrBackendApi::kDirect3D; + this->assignD3DFenceInfo(info); + fIsInitialized = true; + } +#endif + + bool isInitialized() const { return fIsInitialized; } + + GrGLsync glSync() const { + if (!fIsInitialized || GrBackendApi::kOpenGL != fBackend) { + return nullptr; + } + return fGLSync; + } + + +#ifdef SK_DIRECT3D + bool getD3DFenceInfo(GrD3DFenceInfo* outInfo) const; +#endif + +private: +#ifdef SK_DIRECT3D + void assignD3DFenceInfo(const GrD3DFenceInfo& info); +#endif + + GrBackendApi fBackend; + union { + GrGLsync fGLSync; +#ifdef SK_VULKAN + VkSemaphore fVkSemaphore; +#endif +#ifdef SK_METAL + GrMTLHandle fMtlEvent; // Expected to be an id<MTLEvent> +#endif +#ifdef SK_DIRECT3D + GrD3DFenceInfo* fD3DFenceInfo; +#endif + }; +#ifdef SK_METAL + uint64_t fMtlValue; +#endif + bool fIsInitialized; +}; + +#endif diff --git a/src/deps/skia/include/gpu/GrBackendSurface.h b/src/deps/skia/include/gpu/GrBackendSurface.h new file mode 100644 index 000000000..66616ef9b --- /dev/null +++ b/src/deps/skia/include/gpu/GrBackendSurface.h @@ -0,0 +1,643 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrBackendSurface_DEFINED +#define GrBackendSurface_DEFINED + +#include "include/gpu/GrBackendSurfaceMutableState.h" +#include "include/gpu/GrSurfaceInfo.h" +#include "include/gpu/GrTypes.h" +#ifdef SK_GL +#include "include/gpu/gl/GrGLTypes.h" +#include "include/private/GrGLTypesPriv.h" +#endif +#include "include/gpu/mock/GrMockTypes.h" +#ifdef SK_VULKAN +#include "include/gpu/vk/GrVkTypes.h" +#include "include/private/GrVkTypesPriv.h" +#endif + +#ifdef SK_DAWN +#include "include/gpu/dawn/GrDawnTypes.h" +#endif + +class GrBackendSurfaceMutableStateImpl; +class GrVkImageLayout; +class GrGLTextureParameters; +class GrColorFormatDesc; + +#ifdef SK_DAWN +#include "dawn/webgpu_cpp.h" +#endif + +#ifdef SK_METAL +#include "include/gpu/mtl/GrMtlTypes.h" +#endif + +#ifdef SK_DIRECT3D +#include "include/private/GrD3DTypesMinimal.h" +class GrD3DResourceState; +#endif + +#if defined(SK_DEBUG) || GR_TEST_UTILS +class SkString; +#endif + +#if !SK_SUPPORT_GPU + +// SkSurfaceCharacterization always needs a minimal version of this +class SK_API GrBackendFormat { +public: + bool isValid() const { return false; } +}; + +// SkSurface and SkImage rely on a minimal version of these always being available +class SK_API GrBackendTexture { +public: + GrBackendTexture() {} + + bool isValid() const { return false; } +}; + +class SK_API GrBackendRenderTarget { +public: + GrBackendRenderTarget() {} + + bool isValid() const { return false; } + bool isFramebufferOnly() const { return false; } +}; +#else + +enum class GrGLFormat; + +class SK_API GrBackendFormat { +public: + // Creates an invalid backend format. + GrBackendFormat() {} + GrBackendFormat(const GrBackendFormat&); + GrBackendFormat& operator=(const GrBackendFormat&); + +#ifdef SK_GL + static GrBackendFormat MakeGL(GrGLenum format, GrGLenum target) { + return GrBackendFormat(format, target); + } +#endif + +#ifdef SK_VULKAN + static GrBackendFormat MakeVk(VkFormat format, bool willUseDRMFormatModifiers = false) { + return GrBackendFormat(format, GrVkYcbcrConversionInfo(), willUseDRMFormatModifiers); + } + + static GrBackendFormat MakeVk(const GrVkYcbcrConversionInfo& ycbcrInfo, + bool willUseDRMFormatModifiers = false); +#endif + +#ifdef SK_DAWN + static GrBackendFormat MakeDawn(wgpu::TextureFormat format) { + return GrBackendFormat(format); + } +#endif + +#ifdef SK_METAL + static GrBackendFormat MakeMtl(GrMTLPixelFormat format) { + return GrBackendFormat(format); + } +#endif + +#ifdef SK_DIRECT3D + static GrBackendFormat MakeDxgi(DXGI_FORMAT format) { + return GrBackendFormat(format); + } +#endif + + static GrBackendFormat MakeMock(GrColorType colorType, SkImage::CompressionType compression, + bool isStencilFormat = false); + + bool operator==(const GrBackendFormat& that) const; + bool operator!=(const GrBackendFormat& that) const { return !(*this == that); } + + GrBackendApi backend() const { return fBackend; } + GrTextureType textureType() const { return fTextureType; } + + /** + * Gets the channels present in the format as a bitfield of SkColorChannelFlag values. + * Luminance channels are reported as kGray_SkColorChannelFlag. + */ + uint32_t channelMask() const; + + GrColorFormatDesc desc() const; + +#ifdef SK_GL + /** + * If the backend API is GL this gets the format as a GrGLFormat. Otherwise, returns + * GrGLFormat::kUnknown. + */ + GrGLFormat asGLFormat() const; +#endif + +#ifdef SK_VULKAN + /** + * If the backend API is Vulkan this gets the format as a VkFormat and returns true. Otherwise, + * returns false. + */ + bool asVkFormat(VkFormat*) const; + + const GrVkYcbcrConversionInfo* getVkYcbcrConversionInfo() const; +#endif + +#ifdef SK_DAWN + /** + * If the backend API is Dawn this gets the format as a wgpu::TextureFormat and returns true. + * Otherwise, returns false. + */ + bool asDawnFormat(wgpu::TextureFormat*) const; +#endif + +#ifdef SK_METAL + /** + * If the backend API is Metal this gets the format as a GrMtlPixelFormat. Otherwise, + * Otherwise, returns MTLPixelFormatInvalid. + */ + GrMTLPixelFormat asMtlFormat() const; +#endif + +#ifdef SK_DIRECT3D + /** + * If the backend API is Direct3D this gets the format as a DXGI_FORMAT and returns true. + * Otherwise, returns false. + */ + bool asDxgiFormat(DXGI_FORMAT*) const; +#endif + + /** + * If the backend API is not Mock these three calls will return kUnknown, kNone or false, + * respectively. Otherwise, only one of the following can be true. The GrColorType is not + * kUnknown, the compression type is not kNone, or this is a mock stencil format. + */ + GrColorType asMockColorType() const; + SkImage::CompressionType asMockCompressionType() const; + bool isMockStencilFormat() const; + + // If possible, copies the GrBackendFormat and forces the texture type to be Texture2D. If the + // GrBackendFormat was for Vulkan and it originally had a GrVkYcbcrConversionInfo, we will + // remove the conversion and set the format to be VK_FORMAT_R8G8B8A8_UNORM. + GrBackendFormat makeTexture2D() const; + + // Returns true if the backend format has been initialized. + bool isValid() const { return fValid; } + +#if defined(SK_DEBUG) || GR_TEST_UTILS + SkString toStr() const; +#endif + +private: +#ifdef SK_GL + GrBackendFormat(GrGLenum format, GrGLenum target); +#endif + +#ifdef SK_VULKAN + GrBackendFormat(const VkFormat vkFormat, const GrVkYcbcrConversionInfo&, + bool willUseDRMFormatModifiers); +#endif + +#ifdef SK_DAWN + GrBackendFormat(wgpu::TextureFormat format); +#endif + +#ifdef SK_METAL + GrBackendFormat(const GrMTLPixelFormat mtlFormat); +#endif + +#ifdef SK_DIRECT3D + GrBackendFormat(DXGI_FORMAT dxgiFormat); +#endif + + GrBackendFormat(GrColorType, SkImage::CompressionType, bool isStencilFormat); + +#ifdef SK_DEBUG + bool validateMock() const; +#endif + + GrBackendApi fBackend = GrBackendApi::kMock; + bool fValid = false; + + union { +#ifdef SK_GL + GrGLenum fGLFormat; // the sized, internal format of the GL resource +#endif +#ifdef SK_VULKAN + struct { + VkFormat fFormat; + GrVkYcbcrConversionInfo fYcbcrConversionInfo; + } fVk; +#endif +#ifdef SK_DAWN + wgpu::TextureFormat fDawnFormat; +#endif + +#ifdef SK_METAL + GrMTLPixelFormat fMtlFormat; +#endif + +#ifdef SK_DIRECT3D + DXGI_FORMAT fDxgiFormat; +#endif + struct { + GrColorType fColorType; + SkImage::CompressionType fCompressionType; + bool fIsStencilFormat; + } fMock; + }; + GrTextureType fTextureType = GrTextureType::kNone; +}; + +class SK_API GrBackendTexture { +public: + // Creates an invalid backend texture. + GrBackendTexture(); + +#ifdef SK_GL + // The GrGLTextureInfo must have a valid fFormat. + GrBackendTexture(int width, + int height, + GrMipmapped, + const GrGLTextureInfo& glInfo); +#endif + +#ifdef SK_VULKAN + GrBackendTexture(int width, + int height, + const GrVkImageInfo& vkInfo); +#endif + +#ifdef SK_METAL + GrBackendTexture(int width, + int height, + GrMipmapped, + const GrMtlTextureInfo& mtlInfo); +#endif + +#ifdef SK_DIRECT3D + GrBackendTexture(int width, + int height, + const GrD3DTextureResourceInfo& d3dInfo); +#endif + +#ifdef SK_DAWN + GrBackendTexture(int width, + int height, + const GrDawnTextureInfo& dawnInfo); +#endif + + GrBackendTexture(int width, + int height, + GrMipmapped, + const GrMockTextureInfo& mockInfo); + + GrBackendTexture(const GrBackendTexture& that); + + ~GrBackendTexture(); + + GrBackendTexture& operator=(const GrBackendTexture& that); + + SkISize dimensions() const { return {fWidth, fHeight}; } + int width() const { return fWidth; } + int height() const { return fHeight; } + GrMipmapped mipmapped() const { return fMipmapped; } + bool hasMipmaps() const { return fMipmapped == GrMipmapped::kYes; } + /** deprecated alias of hasMipmaps(). */ + bool hasMipMaps() const { return this->hasMipmaps(); } + GrBackendApi backend() const {return fBackend; } + GrTextureType textureType() const { return fTextureType; } + +#ifdef SK_GL + // If the backend API is GL, copies a snapshot of the GrGLTextureInfo struct into the passed in + // pointer and returns true. Otherwise returns false if the backend API is not GL. + bool getGLTextureInfo(GrGLTextureInfo*) const; + + // Call this to indicate that the texture parameters have been modified in the GL context + // externally to GrContext. + void glTextureParametersModified(); +#endif + +#ifdef SK_DAWN + // If the backend API is Dawn, copies a snapshot of the GrDawnTextureInfo struct into the passed + // in pointer and returns true. Otherwise returns false if the backend API is not Dawn. + bool getDawnTextureInfo(GrDawnTextureInfo*) const; +#endif + +#ifdef SK_VULKAN + // If the backend API is Vulkan, copies a snapshot of the GrVkImageInfo struct into the passed + // in pointer and returns true. This snapshot will set the fImageLayout to the current layout + // state. Otherwise returns false if the backend API is not Vulkan. + bool getVkImageInfo(GrVkImageInfo*) const; + + // Anytime the client changes the VkImageLayout of the VkImage captured by this + // GrBackendTexture, they must call this function to notify Skia of the changed layout. + void setVkImageLayout(VkImageLayout); +#endif + +#ifdef SK_METAL + // If the backend API is Metal, copies a snapshot of the GrMtlTextureInfo struct into the passed + // in pointer and returns true. Otherwise returns false if the backend API is not Metal. + bool getMtlTextureInfo(GrMtlTextureInfo*) const; +#endif + +#ifdef SK_DIRECT3D + // If the backend API is Direct3D, copies a snapshot of the GrD3DTextureResourceInfo struct into + // the passed in pointer and returns true. This snapshot will set the fResourceState to the + // current resource state. Otherwise returns false if the backend API is not D3D. + bool getD3DTextureResourceInfo(GrD3DTextureResourceInfo*) const; + + // Anytime the client changes the D3D12_RESOURCE_STATES of the D3D12_RESOURCE captured by this + // GrBackendTexture, they must call this function to notify Skia of the changed layout. + void setD3DResourceState(GrD3DResourceStateEnum); +#endif + + // Get the GrBackendFormat for this texture (or an invalid format if this is not valid). + GrBackendFormat getBackendFormat() const; + + // If the backend API is Mock, copies a snapshot of the GrMockTextureInfo struct into the passed + // in pointer and returns true. Otherwise returns false if the backend API is not Mock. + bool getMockTextureInfo(GrMockTextureInfo*) const; + + // If the client changes any of the mutable backend of the GrBackendTexture they should call + // this function to inform Skia that those values have changed. The backend API specific state + // that can be set from this function are: + // + // Vulkan: VkImageLayout and QueueFamilyIndex + void setMutableState(const GrBackendSurfaceMutableState&); + + // Returns true if we are working with protected content. + bool isProtected() const; + + // Returns true if the backend texture has been initialized. + bool isValid() const { return fIsValid; } + + // Returns true if both textures are valid and refer to the same API texture. + bool isSameTexture(const GrBackendTexture&); + +#if GR_TEST_UTILS + static bool TestingOnly_Equals(const GrBackendTexture& , const GrBackendTexture&); +#endif + +private: + friend class GrVkGpu; // for getMutableState + sk_sp<GrBackendSurfaceMutableStateImpl> getMutableState() const; + +#ifdef SK_GL + friend class GrGLTexture; + friend class GrGLGpu; // for getGLTextureParams + GrBackendTexture(int width, + int height, + GrMipmapped, + const GrGLTextureInfo, + sk_sp<GrGLTextureParameters>); + sk_sp<GrGLTextureParameters> getGLTextureParams() const; +#endif + +#ifdef SK_VULKAN + friend class GrVkTexture; + GrBackendTexture(int width, + int height, + const GrVkImageInfo& vkInfo, + sk_sp<GrBackendSurfaceMutableStateImpl> mutableState); +#endif + +#ifdef SK_DIRECT3D + friend class GrD3DTexture; + friend class GrD3DGpu; // for getGrD3DResourceState + GrBackendTexture(int width, + int height, + const GrD3DTextureResourceInfo& vkInfo, + sk_sp<GrD3DResourceState> state); + sk_sp<GrD3DResourceState> getGrD3DResourceState() const; +#endif + + // Free and release and resources being held by the GrBackendTexture. + void cleanup(); + + bool fIsValid; + int fWidth; //<! width in pixels + int fHeight; //<! height in pixels + GrMipmapped fMipmapped; + GrBackendApi fBackend; + GrTextureType fTextureType; + + union { +#ifdef SK_GL + GrGLBackendTextureInfo fGLInfo; +#endif +#ifdef SK_VULKAN + GrVkBackendSurfaceInfo fVkInfo; +#endif + GrMockTextureInfo fMockInfo; +#ifdef SK_DIRECT3D + GrD3DBackendSurfaceInfo fD3DInfo; +#endif + }; +#ifdef SK_METAL + GrMtlTextureInfo fMtlInfo; +#endif +#ifdef SK_DAWN + GrDawnTextureInfo fDawnInfo; +#endif + + sk_sp<GrBackendSurfaceMutableStateImpl> fMutableState; +}; + +class SK_API GrBackendRenderTarget { +public: + // Creates an invalid backend texture. + GrBackendRenderTarget(); + +#ifdef SK_GL + // The GrGLTextureInfo must have a valid fFormat. If wrapping in an SkSurface we require the + // stencil bits to be either 0, 8 or 16. + GrBackendRenderTarget(int width, + int height, + int sampleCnt, + int stencilBits, + const GrGLFramebufferInfo& glInfo); +#endif + +#ifdef SK_DAWN + // If wrapping in an SkSurface we require the stencil bits to be either 0, 8 or 16. + GrBackendRenderTarget(int width, + int height, + int sampleCnt, + int stencilBits, + const GrDawnRenderTargetInfo& dawnInfo); +#endif + +#ifdef SK_VULKAN + /** Deprecated. Sample count is now part of GrVkImageInfo. */ + GrBackendRenderTarget(int width, int height, int sampleCnt, const GrVkImageInfo& vkInfo); + + GrBackendRenderTarget(int width, int height, const GrVkImageInfo& vkInfo); +#endif + +#ifdef SK_METAL + GrBackendRenderTarget(int width, + int height, + const GrMtlTextureInfo& mtlInfo); + /** Deprecated. Sample count is ignored and is instead retrieved from the MtlTexture. */ + GrBackendRenderTarget(int width, + int height, + int sampleCnt, + const GrMtlTextureInfo& mtlInfo); +#endif + +#ifdef SK_DIRECT3D + GrBackendRenderTarget(int width, + int height, + const GrD3DTextureResourceInfo& d3dInfo); +#endif + + GrBackendRenderTarget(int width, + int height, + int sampleCnt, + int stencilBits, + const GrMockRenderTargetInfo& mockInfo); + + ~GrBackendRenderTarget(); + + GrBackendRenderTarget(const GrBackendRenderTarget& that); + GrBackendRenderTarget& operator=(const GrBackendRenderTarget&); + + SkISize dimensions() const { return {fWidth, fHeight}; } + int width() const { return fWidth; } + int height() const { return fHeight; } + int sampleCnt() const { return fSampleCnt; } + int stencilBits() const { return fStencilBits; } + GrBackendApi backend() const {return fBackend; } + bool isFramebufferOnly() const { return fFramebufferOnly; } + +#ifdef SK_GL + // If the backend API is GL, copies a snapshot of the GrGLFramebufferInfo struct into the passed + // in pointer and returns true. Otherwise returns false if the backend API is not GL. + bool getGLFramebufferInfo(GrGLFramebufferInfo*) const; +#endif + +#ifdef SK_DAWN + // If the backend API is Dawn, copies a snapshot of the GrDawnRenderTargetInfo struct into the + // passed-in pointer and returns true. Otherwise returns false if the backend API is not Dawn. + bool getDawnRenderTargetInfo(GrDawnRenderTargetInfo*) const; +#endif + +#ifdef SK_VULKAN + // If the backend API is Vulkan, copies a snapshot of the GrVkImageInfo struct into the passed + // in pointer and returns true. This snapshot will set the fImageLayout to the current layout + // state. Otherwise returns false if the backend API is not Vulkan. + bool getVkImageInfo(GrVkImageInfo*) const; + + // Anytime the client changes the VkImageLayout of the VkImage captured by this + // GrBackendRenderTarget, they must call this function to notify Skia of the changed layout. + void setVkImageLayout(VkImageLayout); +#endif + +#ifdef SK_METAL + // If the backend API is Metal, copies a snapshot of the GrMtlTextureInfo struct into the passed + // in pointer and returns true. Otherwise returns false if the backend API is not Metal. + bool getMtlTextureInfo(GrMtlTextureInfo*) const; +#endif + +#ifdef SK_DIRECT3D + // If the backend API is Direct3D, copies a snapshot of the GrMtlTextureInfo struct into the + // passed in pointer and returns true. Otherwise returns false if the backend API is not D3D. + bool getD3DTextureResourceInfo(GrD3DTextureResourceInfo*) const; + + // Anytime the client changes the D3D12_RESOURCE_STATES of the D3D12_RESOURCE captured by this + // GrBackendTexture, they must call this function to notify Skia of the changed layout. + void setD3DResourceState(GrD3DResourceStateEnum); +#endif + + // Get the GrBackendFormat for this render target (or an invalid format if this is not valid). + GrBackendFormat getBackendFormat() const; + + // If the backend API is Mock, copies a snapshot of the GrMockTextureInfo struct into the passed + // in pointer and returns true. Otherwise returns false if the backend API is not Mock. + bool getMockRenderTargetInfo(GrMockRenderTargetInfo*) const; + + // If the client changes any of the mutable backend of the GrBackendTexture they should call + // this function to inform Skia that those values have changed. The backend API specific state + // that can be set from this function are: + // + // Vulkan: VkImageLayout and QueueFamilyIndex + void setMutableState(const GrBackendSurfaceMutableState&); + + // Returns true if we are working with protected content. + bool isProtected() const; + + // Returns true if the backend texture has been initialized. + bool isValid() const { return fIsValid; } + + +#if GR_TEST_UTILS + static bool TestingOnly_Equals(const GrBackendRenderTarget&, const GrBackendRenderTarget&); +#endif + +private: + friend class GrVkGpu; // for getMutableState + sk_sp<GrBackendSurfaceMutableStateImpl> getMutableState() const; + +#ifdef SK_VULKAN + friend class GrVkRenderTarget; + GrBackendRenderTarget(int width, + int height, + const GrVkImageInfo& vkInfo, + sk_sp<GrBackendSurfaceMutableStateImpl> mutableState); +#endif + +#ifdef SK_DIRECT3D + friend class GrD3DGpu; + friend class GrD3DRenderTarget; + GrBackendRenderTarget(int width, + int height, + const GrD3DTextureResourceInfo& d3dInfo, + sk_sp<GrD3DResourceState> state); + sk_sp<GrD3DResourceState> getGrD3DResourceState() const; +#endif + + // Free and release and resources being held by the GrBackendTexture. + void cleanup(); + + bool fIsValid; + bool fFramebufferOnly = false; + int fWidth; //<! width in pixels + int fHeight; //<! height in pixels + + int fSampleCnt; + int fStencilBits; + + GrBackendApi fBackend; + + union { +#ifdef SK_GL + GrGLFramebufferInfo fGLInfo; +#endif +#ifdef SK_VULKAN + GrVkBackendSurfaceInfo fVkInfo; +#endif + GrMockRenderTargetInfo fMockInfo; +#ifdef SK_DIRECT3D + GrD3DBackendSurfaceInfo fD3DInfo; +#endif + }; +#ifdef SK_METAL + GrMtlTextureInfo fMtlInfo; +#endif +#ifdef SK_DAWN + GrDawnRenderTargetInfo fDawnInfo; +#endif + sk_sp<GrBackendSurfaceMutableStateImpl> fMutableState; +}; + +#endif + +#endif + diff --git a/src/deps/skia/include/gpu/GrBackendSurfaceMutableState.h b/src/deps/skia/include/gpu/GrBackendSurfaceMutableState.h new file mode 100644 index 000000000..3a5f1d7fa --- /dev/null +++ b/src/deps/skia/include/gpu/GrBackendSurfaceMutableState.h @@ -0,0 +1,91 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrBackendSurfaceMutableState_DEFINED +#define GrBackendSurfaceMutableState_DEFINED + +#include "include/gpu/GrTypes.h" + +#ifdef SK_VULKAN +#include "include/private/GrVkTypesPriv.h" +#endif + +/** + * Since Skia and clients can both modify gpu textures and their connected state, Skia needs a way + * for clients to inform us if they have modifiend any of this state. In order to not need setters + * for every single API and state, we use this class to be a generic wrapper around all the mutable + * state. This class is used for calls that inform Skia of these texture/image state changes by the + * client as well as for requesting state changes to be done by Skia. The backend specific state + * that is wrapped by this class are: + * + * Vulkan: VkImageLayout and QueueFamilyIndex + */ +class SK_API GrBackendSurfaceMutableState { +public: + GrBackendSurfaceMutableState() {} + +#ifdef SK_VULKAN + GrBackendSurfaceMutableState(VkImageLayout layout, uint32_t queueFamilyIndex) + : fVkState(layout, queueFamilyIndex) + , fBackend(GrBackend::kVulkan) + , fIsValid(true) {} +#endif + + GrBackendSurfaceMutableState(const GrBackendSurfaceMutableState& that); + GrBackendSurfaceMutableState& operator=(const GrBackendSurfaceMutableState& that); + +#ifdef SK_VULKAN + // If this class is not Vulkan backed it will return value of VK_IMAGE_LAYOUT_UNDEFINED. + // Otherwise it will return the VkImageLayout. + VkImageLayout getVkImageLayout() const { + if (this->isValid() && fBackend != GrBackendApi::kVulkan) { + return VK_IMAGE_LAYOUT_UNDEFINED; + } + return fVkState.getImageLayout(); + } + + // If this class is not Vulkan backed it will return value of VK_QUEUE_FAMILY_IGNORED. + // Otherwise it will return the VkImageLayout. + uint32_t getQueueFamilyIndex() const { + if (this->isValid() && fBackend != GrBackendApi::kVulkan) { + return VK_QUEUE_FAMILY_IGNORED; + } + return fVkState.getQueueFamilyIndex(); + } +#endif + + // Returns true if the backend mutable state has been initialized. + bool isValid() const { return fIsValid; } + + GrBackendApi backend() const { return fBackend; } + +private: + friend class GrBackendSurfaceMutableStateImpl; + friend class GrVkGpu; + +#ifdef SK_VULKAN + void setVulkanState(VkImageLayout layout, uint32_t queueFamilyIndex) { + SkASSERT(!this->isValid() || fBackend == GrBackendApi::kVulkan); + fVkState.setImageLayout(layout); + fVkState.setQueueFamilyIndex(queueFamilyIndex); + fBackend = GrBackendApi::kVulkan; + fIsValid = true; + } +#endif + + union { + char fPlaceholder; +#ifdef SK_VULKAN + GrVkSharedImageInfo fVkState; +#endif + }; + + GrBackend fBackend = GrBackendApi::kMock; + bool fIsValid = false; +}; + +#endif diff --git a/src/deps/skia/include/gpu/GrConfig.h b/src/deps/skia/include/gpu/GrConfig.h new file mode 100644 index 000000000..9fe6629dd --- /dev/null +++ b/src/deps/skia/include/gpu/GrConfig.h @@ -0,0 +1,53 @@ + +/* + * Copyright 2010 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrConfig_DEFINED +#define GrConfig_DEFINED + +#include "include/core/SkTypes.h" + +/** + * Gr defines are set to 0 or 1, rather than being undefined or defined + */ + +#if !defined(GR_CACHE_STATS) + #if defined(SK_DEBUG) || defined(SK_DUMP_STATS) + #define GR_CACHE_STATS 1 + #else + #define GR_CACHE_STATS 0 + #endif +#endif + +#if !defined(GR_GPU_STATS) + #if defined(SK_DEBUG) || defined(SK_DUMP_STATS) || GR_TEST_UTILS + #define GR_GPU_STATS 1 + #else + #define GR_GPU_STATS 0 + #endif +#endif + +#endif + +/** + * GR_STRING makes a string of X where X is expanded before conversion to a string + * if X itself contains macros. + */ +#define GR_STRING(X) GR_STRING_IMPL(X) +#define GR_STRING_IMPL(X) #X + +/** + * GR_CONCAT concatenates X and Y where each is expanded before + * contanenation if either contains macros. + */ +#define GR_CONCAT(X,Y) GR_CONCAT_IMPL(X,Y) +#define GR_CONCAT_IMPL(X,Y) X##Y + +/** + * Creates a string of the form "<filename>(<linenumber>) : " + */ +#define GR_FILE_AND_LINE_STR __FILE__ "(" GR_STRING(__LINE__) ") : " diff --git a/src/deps/skia/include/gpu/GrContextOptions.h b/src/deps/skia/include/gpu/GrContextOptions.h new file mode 100644 index 000000000..b769d6ca2 --- /dev/null +++ b/src/deps/skia/include/gpu/GrContextOptions.h @@ -0,0 +1,364 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrContextOptions_DEFINED +#define GrContextOptions_DEFINED + +#include "include/core/SkData.h" +#include "include/core/SkString.h" +#include "include/core/SkTypes.h" +#include "include/gpu/GrDriverBugWorkarounds.h" +#include "include/gpu/GrTypes.h" +#include "include/gpu/ShaderErrorHandler.h" +#include "include/private/GrTypesPriv.h" + +#include <vector> + +class SkExecutor; + +#if SK_SUPPORT_GPU +struct SK_API GrContextOptions { + enum class Enable { + /** Forces an option to be disabled. */ + kNo, + /** Forces an option to be enabled. */ + kYes, + /** + * Uses Skia's default behavior, which may use runtime properties (e.g. driver version). + */ + kDefault + }; + + enum class ShaderCacheStrategy { + kSkSL, + kBackendSource, + kBackendBinary, + }; + + /** + * Abstract class which stores Skia data in a cache that persists between sessions. Currently, + * Skia stores compiled shader binaries (only when glProgramBinary / glGetProgramBinary are + * supported) when provided a persistent cache, but this may extend to other data in the future. + */ + class SK_API PersistentCache { + public: + virtual ~PersistentCache() = default; + + /** + * Returns the data for the key if it exists in the cache, otherwise returns null. + */ + virtual sk_sp<SkData> load(const SkData& key) = 0; + + // Placeholder until all clients override the 3-parameter store(), then remove this, and + // make that version pure virtual. + virtual void store(const SkData& /*key*/, const SkData& /*data*/) { SkASSERT(false); } + + /** + * Stores data in the cache, indexed by key. description provides a human-readable + * version of the key. + */ + virtual void store(const SkData& key, const SkData& data, const SkString& /*description*/) { + this->store(key, data); + } + + protected: + PersistentCache() = default; + PersistentCache(const PersistentCache&) = delete; + PersistentCache& operator=(const PersistentCache&) = delete; + }; + + using ShaderErrorHandler = skgpu::ShaderErrorHandler; + + GrContextOptions() {} + + // Suppress prints for the GrContext. + bool fSuppressPrints = false; + + /** + * Controls whether we check for GL errors after functions that allocate resources (e.g. + * glTexImage2D), for shader compilation success, and program link success. Ignored on + * backends other than GL. + */ + Enable fSkipGLErrorChecks = Enable::kDefault; + + /** Overrides: These options override feature detection using backend API queries. These + overrides can only reduce the feature set or limits, never increase them beyond the + detected values. */ + + int fMaxTextureSizeOverride = SK_MaxS32; + + /** the threshold in bytes above which we will use a buffer mapping API to map vertex and index + buffers to CPU memory in order to update them. A value of -1 means the GrContext should + deduce the optimal value for this platform. */ + int fBufferMapThreshold = -1; + + /** + * Executor to handle threaded work within Ganesh. If this is nullptr, then all work will be + * done serially on the main thread. To have worker threads assist with various tasks, set this + * to a valid SkExecutor instance. Currently, used for software path rendering, but may be used + * for other tasks. + */ + SkExecutor* fExecutor = nullptr; + + /** Construct mipmaps manually, via repeated downsampling draw-calls. This is used when + the driver's implementation (glGenerateMipmap) contains bugs. This requires mipmap + level control (ie desktop or ES3). */ + bool fDoManualMipmapping = false; + + /** + * Disables the use of coverage counting shortcuts to render paths. Coverage counting can cause + * artifacts along shared edges if care isn't taken to ensure both contours wind in the same + * direction. + */ + // FIXME: Once this is removed from Chrome and Android, rename to fEnable"". + bool fDisableCoverageCountingPaths = true; + + /** + * Disables distance field rendering for paths. Distance field computation can be expensive, + * and yields no benefit if a path is not rendered multiple times with different transforms. + */ + bool fDisableDistanceFieldPaths = false; + + /** + * If true this allows path mask textures to be cached. This is only really useful if paths + * are commonly rendered at the same scale and fractional translation. + */ + bool fAllowPathMaskCaching = true; + + /** + * If true, the GPU will not be used to perform YUV -> RGB conversion when generating + * textures from codec-backed images. + */ + bool fDisableGpuYUVConversion = false; + + /** + * The maximum size of cache textures used for Skia's Glyph cache. + */ + size_t fGlyphCacheTextureMaximumBytes = 2048 * 1024 * 4; + + /** + * Below this threshold size in device space distance field fonts won't be used. Distance field + * fonts don't support hinting which is more important at smaller sizes. + */ + float fMinDistanceFieldFontSize = 18; + + /** + * Above this threshold size in device space glyphs are drawn as individual paths. + */ +#if defined(SK_BUILD_FOR_ANDROID) + float fGlyphsAsPathsFontSize = 384; +#elif defined(SK_BUILD_FOR_MAC) + float fGlyphsAsPathsFontSize = 256; +#else + float fGlyphsAsPathsFontSize = 324; +#endif + + /** + * Can the glyph atlas use multiple textures. If allowed, the each texture's size is bound by + * fGlypheCacheTextureMaximumBytes. + */ + Enable fAllowMultipleGlyphCacheTextures = Enable::kDefault; + + /** + * Bugs on certain drivers cause stencil buffers to leak. This flag causes Skia to avoid + * allocating stencil buffers and use alternate rasterization paths, avoiding the leak. + */ + bool fAvoidStencilBuffers = false; + + /** + * If true, texture fetches from mip-mapped textures will be biased to read larger MIP levels. + * This has the effect of sharpening those textures, at the cost of some aliasing, and possible + * performance impact. + */ + bool fSharpenMipmappedTextures = false; + + /** + * Enables driver workaround to use draws instead of HW clears, e.g. glClear on the GL backend. + */ + Enable fUseDrawInsteadOfClear = Enable::kDefault; + + /** + * Allow Ganesh to more aggressively reorder operations to reduce the number of render passes. + * Offscreen draws will be done upfront instead of interrupting the main render pass when + * possible. May increase VRAM usage, but still observes the resource cache limit. + * Enabled by default. + */ + Enable fReduceOpsTaskSplitting = Enable::kDefault; + + /** + * Some ES3 contexts report the ES2 external image extension, but not the ES3 version. + * If support for external images is critical, enabling this option will cause Ganesh to limit + * shaders to the ES2 shading language in that situation. + */ + bool fPreferExternalImagesOverES3 = false; + + /** + * Disables correctness workarounds that are enabled for particular GPUs, OSes, or drivers. + * This does not affect code path choices that are made for perfomance reasons nor does it + * override other GrContextOption settings. + */ + bool fDisableDriverCorrectnessWorkarounds = false; + + /** + * Maximum number of GPU programs or pipelines to keep active in the runtime cache. + */ + int fRuntimeProgramCacheSize = 256; + + /** + * Cache in which to store compiled shader binaries between runs. + */ + PersistentCache* fPersistentCache = nullptr; + + /** + * This affects the usage of the PersistentCache. We can cache SkSL, backend source (GLSL), or + * backend binaries (GL program binaries). By default we cache binaries, but if the driver's + * binary loading/storing is believed to have bugs, this can be limited to caching GLSL. + * Caching GLSL strings still saves CPU work when a GL program is created. + */ + ShaderCacheStrategy fShaderCacheStrategy = ShaderCacheStrategy::kBackendBinary; + + /** + * If present, use this object to report shader compilation failures. If not, report failures + * via SkDebugf and assert. + */ + ShaderErrorHandler* fShaderErrorHandler = nullptr; + + /** + * Specifies the number of samples Ganesh should use when performing internal draws with MSAA + * (hardware capabilities permitting). + * + * If 0, Ganesh will disable internal code paths that use multisampling. + */ + int fInternalMultisampleCount = 4; + + /** + * In Skia's vulkan backend a single GrContext submit equates to the submission of a single + * primary command buffer to the VkQueue. This value specifies how many vulkan secondary command + * buffers we will cache for reuse on a given primary command buffer. A single submit may use + * more than this many secondary command buffers, but after the primary command buffer is + * finished on the GPU it will only hold on to this many secondary command buffers for reuse. + * + * A value of -1 means we will pick a limit value internally. + */ + int fMaxCachedVulkanSecondaryCommandBuffers = -1; + + /** + * If true, the caps will never support mipmaps. + */ + bool fSuppressMipmapSupport = false; + + /** + * If true, and if supported, enables hardware tessellation in the caps. + */ + bool fEnableExperimentalHardwareTessellation = false; + + /** + * If true, then add 1 pixel padding to all glyph masks in the atlas to support bi-lerp + * rendering of all glyphs. This must be set to true to use GrSlug. + */ + #if defined(SK_EXPERIMENTAL_SIMULATE_DRAWGLYPHRUNLIST_WITH_SLUG) + bool fSupportBilerpFromGlyphAtlas = true; + #else + bool fSupportBilerpFromGlyphAtlas = false; + #endif + + /** + * Uses a reduced variety of shaders. May perform less optimally in steady state but can reduce + * jank due to shader compilations. + */ + bool fReducedShaderVariations = false; + + /** + * If true, then allow to enable MSAA on new Intel GPUs. + */ + bool fAllowMSAAOnNewIntel = false; + +#if GR_TEST_UTILS + /** + * Private options that are only meant for testing within Skia's tools. + */ + + /** + * Prevents use of dual source blending, to test that all xfer modes work correctly without it. + */ + bool fSuppressDualSourceBlending = false; + + /** + * Prevents the use of non-coefficient-based blend equations, for testing dst reads, barriers, + * and in-shader blending. + */ + bool fSuppressAdvancedBlendEquations = false; + + /** + * Prevents the use of framebuffer fetches, for testing dst reads and texture barriers. + */ + bool fSuppressFramebufferFetch = false; + + /** + * If greater than zero and less than the actual hardware limit, overrides the maximum number of + * tessellation segments supported by the caps. + */ + int fMaxTessellationSegmentsOverride = 0; + + /** + * If true, then all paths are processed as if "setIsVolatile" had been called. + */ + bool fAllPathsVolatile = false; + + /** + * Render everything in wireframe + */ + bool fWireframeMode = false; + + /** + * Enforces clearing of all textures when they're created. + */ + bool fClearAllTextures = false; + + /** + * Randomly generate a (false) GL_OUT_OF_MEMORY error + */ + bool fRandomGLOOM = false; + + /** + * Force off support for write/transfer pixels row bytes in caps. + */ + bool fDisallowWriteAndTransferPixelRowBytes = false; + + /** + * Include or exclude specific GPU path renderers. + */ + GpuPathRenderers fGpuPathRenderers = GpuPathRenderers::kDefault; + + /** + * Specify the GPU resource cache limit. Equivalent to calling `setResourceCacheLimit` on the + * context at construction time. + * + * A value of -1 means use the default limit value. + */ + int fResourceCacheLimitOverride = -1; + + /** + * If true, then always try to use hardware tessellation, regardless of how small a path may be. + */ + bool fAlwaysPreferHardwareTessellation = false; + + /** + * Maximum width and height of internal texture atlases. + */ + int fMaxTextureAtlasSize = 2048; +#endif + + GrDriverBugWorkarounds fDriverBugWorkarounds; +}; +#else +struct GrContextOptions { + struct PersistentCache {}; +}; +#endif + +#endif diff --git a/src/deps/skia/include/gpu/GrContextThreadSafeProxy.h b/src/deps/skia/include/gpu/GrContextThreadSafeProxy.h new file mode 100644 index 000000000..1bf1f1f96 --- /dev/null +++ b/src/deps/skia/include/gpu/GrContextThreadSafeProxy.h @@ -0,0 +1,159 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrContextThreadSafeProxy_DEFINED +#define GrContextThreadSafeProxy_DEFINED + +#include "include/core/SkRefCnt.h" + +#if SK_SUPPORT_GPU + +#include "include/core/SkImageInfo.h" +#include "include/gpu/GrContextOptions.h" +#include "include/gpu/GrTypes.h" + +#include <atomic> + +class GrBackendFormat; +class GrCaps; +class GrContextThreadSafeProxyPriv; +class GrTextBlobRedrawCoordinator; +class GrThreadSafeCache; +class GrThreadSafePipelineBuilder; +class SkSurfaceCharacterization; +class SkSurfaceProps; + +/** + * Can be used to perform actions related to the generating GrContext in a thread safe manner. The + * proxy does not access the 3D API (e.g. OpenGL) that backs the generating GrContext. + */ +class SK_API GrContextThreadSafeProxy final : public SkNVRefCnt<GrContextThreadSafeProxy> { +public: + ~GrContextThreadSafeProxy(); + + /** + * Create a surface characterization for a DDL that will be replayed into the GrContext + * that created this proxy. On failure the resulting characterization will be invalid (i.e., + * "!c.isValid()"). + * + * @param cacheMaxResourceBytes The max resource bytes limit that will be in effect + * when the DDL created with this characterization is + * replayed. + * Note: the contract here is that the DDL will be + * created as if it had a full 'cacheMaxResourceBytes' + * to use. If replayed into a GrContext that already has + * locked GPU memory, the replay can exceed the budget. + * To rephrase, all resource allocation decisions are + * made at record time and at playback time the budget + * limits will be ignored. + * @param ii The image info specifying properties of the SkSurface + * that the DDL created with this characterization will + * be replayed into. + * Note: Ganesh doesn't make use of the SkImageInfo's + * alphaType + * @param backendFormat Information about the format of the GPU surface that + * will back the SkSurface upon replay + * @param sampleCount The sample count of the SkSurface that the DDL + * created with this characterization will be replayed + * into + * @param origin The origin of the SkSurface that the DDL created with + * this characterization will be replayed into + * @param surfaceProps The surface properties of the SkSurface that the DDL + * created with this characterization will be replayed + * into + * @param isMipMapped Will the surface the DDL will be replayed into have + * space allocated for mipmaps? + * @param willUseGLFBO0 Will the surface the DDL will be replayed into be + * backed by GL FBO 0. This flag is only valid if using + * an GL backend. + * @param isTextureable Will the surface be able to act as a texture? + * @param isProtected Will the (Vulkan) surface be DRM protected? + * @param vkRTSupportsInputAttachment Can the vulkan surface be used as in input + attachment? + * @param forVulkanSecondaryCommandBuffer Will the surface be wrapping a vulkan secondary + * command buffer via a GrVkSecondaryCBDrawContext? If + * this is true then the following is required: + * isTexureable = false + * isMipMapped = false + * willUseGLFBO0 = false + * vkRTSupportsInputAttachment = false + */ + SkSurfaceCharacterization createCharacterization( + size_t cacheMaxResourceBytes, + const SkImageInfo& ii, + const GrBackendFormat& backendFormat, + int sampleCount, + GrSurfaceOrigin origin, + const SkSurfaceProps& surfaceProps, + bool isMipMapped, + bool willUseGLFBO0 = false, + bool isTextureable = true, + GrProtected isProtected = GrProtected::kNo, + bool vkRTSupportsInputAttachment = false, + bool forVulkanSecondaryCommandBuffer = false); + + /* + * Retrieve the default GrBackendFormat for a given SkColorType and renderability. + * It is guaranteed that this backend format will be the one used by the following + * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods. + * + * The caller should check that the returned format is valid. + */ + GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const; + + /** + * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is + * guaranteed to match the backend format used by the following + * createCompressedBackendTexture methods that take a CompressionType. + * + * The caller should check that the returned format is valid. + */ + GrBackendFormat compressedBackendFormat(SkImage::CompressionType c) const; + + bool isValid() const { return nullptr != fCaps; } + + bool operator==(const GrContextThreadSafeProxy& that) const { + // Each GrContext should only ever have a single thread-safe proxy. + SkASSERT((this == &that) == (this->fContextID == that.fContextID)); + return this == &that; + } + + bool operator!=(const GrContextThreadSafeProxy& that) const { return !(*this == that); } + + // Provides access to functions that aren't part of the public API. + GrContextThreadSafeProxyPriv priv(); + const GrContextThreadSafeProxyPriv priv() const; // NOLINT(readability-const-return-type) + +private: + friend class GrContextThreadSafeProxyPriv; // for ctor and hidden methods + + // DDL TODO: need to add unit tests for backend & maybe options + GrContextThreadSafeProxy(GrBackendApi, const GrContextOptions&); + + void abandonContext(); + bool abandoned() const; + + // TODO: This should be part of the constructor but right now we have a chicken-and-egg problem + // with GrContext where we get the caps by creating a GPU which requires a context (see the + // `init` method on GrContext_Base). + void init(sk_sp<const GrCaps>, sk_sp<GrThreadSafePipelineBuilder>); + + const GrBackendApi fBackend; + const GrContextOptions fOptions; + const uint32_t fContextID; + sk_sp<const GrCaps> fCaps; + std::unique_ptr<GrTextBlobRedrawCoordinator> fTextBlobRedrawCoordinator; + std::unique_ptr<GrThreadSafeCache> fThreadSafeCache; + sk_sp<GrThreadSafePipelineBuilder> fPipelineBuilder; + std::atomic<bool> fAbandoned{false}; +}; + +#else // !SK_SUPPORT_GPU +class SK_API GrContextThreadSafeProxy final : public SkNVRefCnt<GrContextThreadSafeProxy> {}; +#endif + +#endif diff --git a/src/deps/skia/include/gpu/GrDirectContext.h b/src/deps/skia/include/gpu/GrDirectContext.h new file mode 100644 index 000000000..4ff25b0f5 --- /dev/null +++ b/src/deps/skia/include/gpu/GrDirectContext.h @@ -0,0 +1,880 @@ +/* + * Copyright 2020 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrDirectContext_DEFINED +#define GrDirectContext_DEFINED + +#include "include/gpu/GrRecordingContext.h" + +#include "include/gpu/GrBackendSurface.h" + +// We shouldn't need this but currently Android is relying on this being include transitively. +#include "include/core/SkUnPreMultiply.h" + +class GrAtlasManager; +class GrBackendSemaphore; +class GrClientMappedBufferManager; +class GrDirectContextPriv; +class GrContextThreadSafeProxy; +struct GrD3DBackendContext; +class GrFragmentProcessor; +class GrGpu; +struct GrGLInterface; +struct GrMtlBackendContext; +struct GrMockOptions; +class GrPath; +class GrResourceCache; +class GrResourceProvider; +class GrStrikeCache; +class GrSurfaceProxy; +class GrSwizzle; +class GrTextureProxy; +struct GrVkBackendContext; + +class SkImage; +class SkString; +class SkSurfaceCharacterization; +class SkSurfaceProps; +class SkTaskGroup; +class SkTraceMemoryDump; + +namespace skgpu { namespace v1 { class SmallPathAtlasMgr; }} + +class SK_API GrDirectContext : public GrRecordingContext { +public: +#ifdef SK_GL + /** + * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the + * result of GrGLMakeNativeInterface() is used if it succeeds. + */ + static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&); + static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>); + static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&); + static sk_sp<GrDirectContext> MakeGL(); +#endif + +#ifdef SK_VULKAN + /** + * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned + * GrDirectContext is destroyed. This also means that any objects created with this + * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold + * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released, + * then it is safe to delete the vulkan objects. + */ + static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&); + static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&); +#endif + +#ifdef SK_METAL + /** + * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a + * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must + * have their own ref which will be released when the GrMtlBackendContext is destroyed. + * Ganesh will take its own ref on the objects which will be released when the GrDirectContext + * is destroyed. + */ + static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&); + static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&); + /** + * Deprecated. + * + * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an + * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects + * must have a ref on them that can be transferred to Ganesh, which will release the ref + * when the GrDirectContext is destroyed. + */ + static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&); + static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue); +#endif + +#ifdef SK_DIRECT3D + /** + * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context + * must be kept alive until the returned GrDirectContext is first destroyed or abandoned. + */ + static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&); + static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&); +#endif + +#ifdef SK_DAWN + static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&, + const GrContextOptions&); + static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&); +#endif + + static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&); + static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*); + + ~GrDirectContext() override; + + /** + * The context normally assumes that no outsider is setting state + * within the underlying 3D API's context/device/whatever. This call informs + * the context that the state was modified and it should resend. Shouldn't + * be called frequently for good performance. + * The flag bits, state, is dependent on which backend is used by the + * context, either GL or D3D (possible in future). + */ + void resetContext(uint32_t state = kAll_GrBackendState); + + /** + * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which + * the context has modified the bound texture will have texture id 0 bound. This does not + * flush the context. Calling resetContext() does not change the set that will be bound + * to texture id 0 on the next call to resetGLTextureBindings(). After this is called + * all unit/target combinations are considered to have unmodified bindings until the context + * subsequently modifies them (meaning if this is called twice in a row with no intervening + * context usage then the second call is a no-op.) + */ + void resetGLTextureBindings(); + + /** + * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer + * usable. Call this if you have lost the associated GPU context, and thus internal texture, + * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the + * context and any of its created resource objects will not make backend 3D API calls. Content + * rendered but not previously flushed may be lost. After this function is called all subsequent + * calls on the context will fail or be no-ops. + * + * The typical use case for this function is that the underlying 3D context was lost and further + * API calls may crash. + * + * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to + * create the context must be kept alive even after abandoning the context. Those objects must + * live for the lifetime of the context object itself. The reason for this is so that + * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be + * cleaned up even in a device lost state. + */ + void abandonContext() override; + + /** + * Returns true if the context was abandoned or if the if the backend specific context has + * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a + * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this + * context. + */ + bool abandoned() override; + + // TODO: Remove this from public after migrating Chrome. + sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); + + /** + * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is + * reset and will return false until another out-of-memory error is reported by the 3D API. If + * the context is abandoned then this will report false. + * + * Currently this is implemented for: + * + * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and + * therefore hide the error from Skia. Also, it is not advised to use this in combination with + * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever + * checking the GL context for OOM. + * + * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has + * occurred. + */ + bool oomed(); + + /** + * This is similar to abandonContext() however the underlying 3D context is not yet lost and + * the context will cleanup all allocated resources before returning. After returning it will + * assume that the underlying context may no longer be valid. + * + * The typical use case for this function is that the client is going to destroy the 3D context + * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed + * elsewhere by either the client or Skia objects). + * + * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to + * create the context must be alive before calling releaseResourcesAndAbandonContext. + */ + void releaseResourcesAndAbandonContext(); + + /////////////////////////////////////////////////////////////////////////// + // Resource Cache + + /** DEPRECATED + * Return the current GPU resource cache limits. + * + * @param maxResources If non-null, will be set to -1. + * @param maxResourceBytes If non-null, returns maximum number of bytes of + * video memory that can be held in the cache. + */ + void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const; + + /** + * Return the current GPU resource cache limit in bytes. + */ + size_t getResourceCacheLimit() const; + + /** + * Gets the current GPU resource cache usage. + * + * @param resourceCount If non-null, returns the number of resources that are held in the + * cache. + * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held + * in the cache. + */ + void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; + + /** + * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources. + */ + size_t getResourceCachePurgeableBytes() const; + + /** DEPRECATED + * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes + * limit, it will be purged (LRU) to keep the cache within the limit. + * + * @param maxResources Unused. + * @param maxResourceBytes The maximum number of bytes of video memory + * that can be held in the cache. + */ + void setResourceCacheLimits(int maxResources, size_t maxResourceBytes); + + /** + * Specify the GPU resource cache limit. If the cache currently exceeds this limit, + * it will be purged (LRU) to keep the cache within the limit. + * + * @param maxResourceBytes The maximum number of bytes of video memory + * that can be held in the cache. + */ + void setResourceCacheLimit(size_t maxResourceBytes); + + /** + * Frees GPU created by the context. Can be called to reduce GPU memory + * pressure. + */ + void freeGpuResources(); + + /** + * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are + * otherwise marked for deletion, regardless of whether the context is under budget. + * + * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will + * be purged but the unlocked resources with persistent data will remain. If + * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be + * purged. + * + * @param msNotUsed Only unlocked resources not used in these last milliseconds + * will be cleaned up. + * @param scratchResourcesOnly If true only unlocked scratch resources will be purged. + */ + void performDeferredCleanup(std::chrono::milliseconds msNotUsed, + bool scratchResourcesOnly=false); + + // Temporary compatibility API for Android. + void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) { + this->performDeferredCleanup(msNotUsed); + } + + /** + * Purge unlocked resources from the cache until the the provided byte count has been reached + * or we have purged all unlocked resources. The default policy is to purge in LRU order, but + * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other + * resource types. + * + * @param maxBytesToPurge the desired number of bytes to be purged. + * @param preferScratchResources If true scratch resources will be purged prior to other + * resource types. + */ + void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources); + + /** + * This entry point is intended for instances where an app has been backgrounded or + * suspended. + * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the + * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false + * then all unlocked resources will be purged. + * In either case, after the unlocked resources are purged a separate pass will be made to + * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true + * some resources with persistent data may be purged to be under budget). + * + * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior + * enforcing the budget requirements. + */ + void purgeUnlockedResources(bool scratchResourcesOnly); + + /** + * Gets the maximum supported texture size. + */ + using GrRecordingContext::maxTextureSize; + + /** + * Gets the maximum supported render target size. + */ + using GrRecordingContext::maxRenderTargetSize; + + /** + * Can a SkImage be created with the given color type. + */ + using GrRecordingContext::colorTypeSupportedAsImage; + + /** + * Can a SkSurface be created with the given color type. To check whether MSAA is supported + * use maxSurfaceSampleCountForColorType(). + */ + using GrRecordingContext::colorTypeSupportedAsSurface; + + /** + * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA + * rendering is supported for the color type. 0 is returned if rendering to this color type + * is not supported at all. + */ + using GrRecordingContext::maxSurfaceSampleCountForColorType; + + /////////////////////////////////////////////////////////////////////////// + // Misc. + + /** + * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before + * executing any more commands on the GPU. If this call returns false, then the GPU back-end + * will not wait on any passed in semaphores, and the client will still own the semaphores, + * regardless of the value of deleteSemaphoresAfterWait. + * + * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case + * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it + * knows that Skia has finished waiting on them. This can be done by using finishedProcs on + * flush calls. + */ + bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores, + bool deleteSemaphoresAfterWait = true); + + /** + * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D + * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by + * GrContext::submit(syncCpu). + */ + void flushAndSubmit(bool syncCpu = false) { + this->flush(GrFlushInfo()); + this->submit(syncCpu); + } + + /** + * Call to ensure all drawing to the context has been flushed to underlying 3D API specific + * objects. A call to `submit` is always required to ensure work is actually sent to + * the gpu. Some specific API details: + * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some + * sync objects from the flush will not be valid until a submission occurs. + * + * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command + * buffer or encoder objects. However, these objects are not sent to the gpu until a + * submission occurs. + * + * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be + * submitted to the gpu during the next submit call (it is possible Skia failed to create a + * subset of the semaphores). The client should not wait on these semaphores until after submit + * has been called, and must keep them alive until then. If this call returns + * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on + * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with + * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the + * client is still responsible for deleting any initialized semaphores. + * Regardleess of semaphore submission the context will still be flushed. It should be + * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not + * happen. It simply means there were no semaphores submitted to the GPU. A caller should only + * take this as a failure if they passed in semaphores to be submitted. + */ + GrSemaphoresSubmitted flush(const GrFlushInfo& info); + + void flush() { this->flush({}); } + + /** + * Submit outstanding work to the gpu from all previously un-submitted flushes. The return + * value of the submit will indicate whether or not the submission to the GPU was successful. + * + * If the call returns true, all previously passed in semaphores in flush calls will have been + * submitted to the GPU and they can safely be waited on. The caller should wait on those + * semaphores or perform some other global synchronization before deleting the semaphores. + * + * If it returns false, then those same semaphores will not have been submitted and we will not + * try to submit them again. The caller is free to delete the semaphores at any time. + * + * If the syncCpu flag is true this function will return once the gpu has finished with all + * submitted work. + */ + bool submit(bool syncCpu = false); + + /** + * Checks whether any asynchronous work is complete and if so calls related callbacks. + */ + void checkAsyncWorkCompletion(); + + /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */ + // Chrome is using this! + void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; + + bool supportsDistanceFieldText() const; + + void storeVkPipelineCacheData(); + + /** + * Retrieve the default GrBackendFormat for a given SkColorType and renderability. + * It is guaranteed that this backend format will be the one used by the following + * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods. + * + * The caller should check that the returned format is valid. + */ + using GrRecordingContext::defaultBackendFormat; + + /** + * The explicitly allocated backend texture API allows clients to use Skia to create backend + * objects outside of Skia proper (i.e., Skia's caching system will not know about them.) + * + * It is the client's responsibility to delete all these objects (using deleteBackendTexture) + * before deleting the context used to create them. If the backend is Vulkan, the textures must + * be deleted before abandoning the context as well. Additionally, clients should only delete + * these objects on the thread for which that context is active. + * + * The client is responsible for ensuring synchronization between different uses + * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the + * surface, rewrapping it in a image and drawing the image will require explicit + * synchronization on the client's part). + */ + + /** + * If possible, create an uninitialized backend texture. The client should ensure that the + * returned backend texture is valid. + * For the Vulkan backend the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_UNDEFINED. + */ + GrBackendTexture createBackendTexture(int width, int height, + const GrBackendFormat&, + GrMipmapped, + GrRenderable, + GrProtected = GrProtected::kNo); + + /** + * If possible, create an uninitialized backend texture. The client should ensure that the + * returned backend texture is valid. + * If successful, the created backend texture will be compatible with the provided + * SkColorType. + * For the Vulkan backend the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_UNDEFINED. + */ + GrBackendTexture createBackendTexture(int width, int height, + SkColorType, + GrMipmapped, + GrRenderable, + GrProtected = GrProtected::kNo); + + /** + * If possible, create a backend texture initialized to a particular color. The client should + * ensure that the returned backend texture is valid. The client can pass in a finishedProc + * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The + * client is required to call `submit` to send the upload work to the gpu. The + * finishedProc will always get called even if we failed to create the GrBackendTexture. + * For the Vulkan backend the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + GrBackendTexture createBackendTexture(int width, int height, + const GrBackendFormat&, + const SkColor4f& color, + GrMipmapped, + GrRenderable, + GrProtected = GrProtected::kNo, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + /** + * If possible, create a backend texture initialized to a particular color. The client should + * ensure that the returned backend texture is valid. The client can pass in a finishedProc + * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The + * client is required to call `submit` to send the upload work to the gpu. The + * finishedProc will always get called even if we failed to create the GrBackendTexture. + * If successful, the created backend texture will be compatible with the provided + * SkColorType. + * For the Vulkan backend the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + GrBackendTexture createBackendTexture(int width, int height, + SkColorType, + const SkColor4f& color, + GrMipmapped, + GrRenderable, + GrProtected = GrProtected::kNo, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + /** + * If possible, create a backend texture initialized with the provided pixmap data. The client + * should ensure that the returned backend texture is valid. The client can pass in a + * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be + * deleted. The client is required to call `submit` to send the upload work to the gpu. + * The finishedProc will always get called even if we failed to create the GrBackendTexture. + * If successful, the created backend texture will be compatible with the provided + * pixmap(s). Compatible, in this case, means that the backend format will be the result + * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted + * when this call returns. + * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired + * the data for all the mipmap levels must be provided. In the mipmapped case all the + * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels + * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The + * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture. + * Note: the pixmap's alphatypes and colorspaces are ignored. + * For the Vulkan backend the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + GrBackendTexture createBackendTexture(const SkPixmap srcData[], + int numLevels, + GrSurfaceOrigin, + GrRenderable, + GrProtected, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + /** + * Convenience version createBackendTexture() that takes just a base level pixmap. + */ + GrBackendTexture createBackendTexture(const SkPixmap& srcData, + GrSurfaceOrigin textureOrigin, + GrRenderable renderable, + GrProtected isProtected, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr) { + return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected, + finishedProc, finishedContext); + } + + // Deprecated versions that do not take origin and assume top-left. + GrBackendTexture createBackendTexture(const SkPixmap srcData[], + int numLevels, + GrRenderable renderable, + GrProtected isProtected, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr) { + return this->createBackendTexture(srcData, + numLevels, + kTopLeft_GrSurfaceOrigin, + renderable, + isProtected, + finishedProc, + finishedContext); + } + GrBackendTexture createBackendTexture(const SkPixmap& srcData, + GrRenderable renderable, + GrProtected isProtected, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr) { + return this->createBackendTexture(&srcData, + 1, + renderable, + isProtected, + finishedProc, + finishedContext); + } + + /** + * If possible, updates a backend texture to be filled to a particular color. The client should + * check the return value to see if the update was successful. The client can pass in a + * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be + * deleted. The client is required to call `submit` to send the upload work to the gpu. + * The finishedProc will always get called even if we failed to update the GrBackendTexture. + * For the Vulkan backend after a successful update the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + bool updateBackendTexture(const GrBackendTexture&, + const SkColor4f& color, + GrGpuFinishedProc finishedProc, + GrGpuFinishedContext finishedContext); + + /** + * If possible, updates a backend texture to be filled to a particular color. The data in + * GrBackendTexture and passed in color is interpreted with respect to the passed in + * SkColorType. The client should check the return value to see if the update was successful. + * The client can pass in a finishedProc to be notified when the data has been uploaded by the + * gpu and the texture can be deleted. The client is required to call `submit` to send + * the upload work to the gpu. The finishedProc will always get called even if we failed to + * update the GrBackendTexture. + * For the Vulkan backend after a successful update the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + bool updateBackendTexture(const GrBackendTexture&, + SkColorType skColorType, + const SkColor4f& color, + GrGpuFinishedProc finishedProc, + GrGpuFinishedContext finishedContext); + + /** + * If possible, updates a backend texture filled with the provided pixmap data. The client + * should check the return value to see if the update was successful. The client can pass in a + * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be + * deleted. The client is required to call `submit` to send the upload work to the gpu. + * The finishedProc will always get called even if we failed to create the GrBackendTexture. + * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case, + * means that the backend format is compatible with the base pixmap's colortype. The src data + * can be deleted when this call returns. + * If the backend texture is mip mapped, the data for all the mipmap levels must be provided. + * In the mipmapped case all the colortypes of the provided pixmaps must be the same. + * Additionally, all the miplevels must be sized correctly (please see + * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the + * pixmap data is vertically flipped in the texture. + * Note: the pixmap's alphatypes and colorspaces are ignored. + * For the Vulkan backend after a successful update the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + bool updateBackendTexture(const GrBackendTexture&, + const SkPixmap srcData[], + int numLevels, + GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + /** + * Convenience version of updateBackendTexture that takes just a base level pixmap. + */ + bool updateBackendTexture(const GrBackendTexture& texture, + const SkPixmap& srcData, + GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr) { + return this->updateBackendTexture(texture, + &srcData, + 1, + textureOrigin, + finishedProc, + finishedContext); + } + + // Deprecated version that does not take origin and assumes top-left. + bool updateBackendTexture(const GrBackendTexture& texture, + const SkPixmap srcData[], + int numLevels, + GrGpuFinishedProc finishedProc, + GrGpuFinishedContext finishedContext) { + return this->updateBackendTexture(texture, + srcData, + numLevels, + kTopLeft_GrSurfaceOrigin, + finishedProc, + finishedContext); + } + + /** + * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is + * guaranteed to match the backend format used by the following + * createCompressedBackendTexture methods that take a CompressionType. + * + * The caller should check that the returned format is valid. + */ + using GrRecordingContext::compressedBackendFormat; + + /** + *If possible, create a compressed backend texture initialized to a particular color. The + * client should ensure that the returned backend texture is valid. The client can pass in a + * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be + * deleted. The client is required to call `submit` to send the upload work to the gpu. + * The finishedProc will always get called even if we failed to create the GrBackendTexture. + * For the Vulkan backend the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + GrBackendTexture createCompressedBackendTexture(int width, int height, + const GrBackendFormat&, + const SkColor4f& color, + GrMipmapped, + GrProtected = GrProtected::kNo, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + GrBackendTexture createCompressedBackendTexture(int width, int height, + SkImage::CompressionType, + const SkColor4f& color, + GrMipmapped, + GrProtected = GrProtected::kNo, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + /** + * If possible, create a backend texture initialized with the provided raw data. The client + * should ensure that the returned backend texture is valid. The client can pass in a + * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be + * deleted. The client is required to call `submit` to send the upload work to the gpu. + * The finishedProc will always get called even if we failed to create the GrBackendTexture + * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired + * the data for all the mipmap levels must be provided. Additionally, all the miplevels + * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). + * For the Vulkan backend the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + GrBackendTexture createCompressedBackendTexture(int width, int height, + const GrBackendFormat&, + const void* data, size_t dataSize, + GrMipmapped, + GrProtected = GrProtected::kNo, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + GrBackendTexture createCompressedBackendTexture(int width, int height, + SkImage::CompressionType, + const void* data, size_t dataSize, + GrMipmapped, + GrProtected = GrProtected::kNo, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + /** + * If possible, updates a backend texture filled with the provided color. If the texture is + * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client + * should check the return value to see if the update was successful. The client can pass in a + * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be + * deleted. The client is required to call `submit` to send the upload work to the gpu. + * The finishedProc will always get called even if we failed to create the GrBackendTexture. + * For the Vulkan backend after a successful update the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + bool updateCompressedBackendTexture(const GrBackendTexture&, + const SkColor4f& color, + GrGpuFinishedProc finishedProc, + GrGpuFinishedContext finishedContext); + + /** + * If possible, updates a backend texture filled with the provided raw data. The client + * should check the return value to see if the update was successful. The client can pass in a + * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be + * deleted. The client is required to call `submit` to send the upload work to the gpu. + * The finishedProc will always get called even if we failed to create the GrBackendTexture. + * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided. + * Additionally, all the miplevels must be sized correctly (please see + * SkMipMap::ComputeLevelSize and ComputeLevelCount). + * For the Vulkan backend after a successful update the layout of the created VkImage will be: + * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + */ + bool updateCompressedBackendTexture(const GrBackendTexture&, + const void* data, + size_t dataSize, + GrGpuFinishedProc finishedProc, + GrGpuFinishedContext finishedContext); + + /** + * Updates the state of the GrBackendTexture/RenderTarget to have the passed in + * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and + * SkImages) will also be aware of this state change. This call does not submit the state change + * to the gpu, but requires the client to call `submit` to send it to the GPU. The work + * for this call is ordered linearly with all other calls that require GrContext::submit to be + * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be + * called with finishedContext after the state transition is known to have occurred on the GPU. + * + * See GrBackendSurfaceMutableState to see what state can be set via this call. + * + * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's + * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to + * tell Skia to not change those respective states. + * + * If previousState is not null and this returns true, then Skia will have filled in + * previousState to have the values of the state before this call. + */ + bool setBackendTextureState(const GrBackendTexture&, + const GrBackendSurfaceMutableState&, + GrBackendSurfaceMutableState* previousState = nullptr, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + bool setBackendRenderTargetState(const GrBackendRenderTarget&, + const GrBackendSurfaceMutableState&, + GrBackendSurfaceMutableState* previousState = nullptr, + GrGpuFinishedProc finishedProc = nullptr, + GrGpuFinishedContext finishedContext = nullptr); + + void deleteBackendTexture(GrBackendTexture); + + // This interface allows clients to pre-compile shaders and populate the runtime program cache. + // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format. + // + // Steps to use this API: + // + // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to + // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This + // will ensure that the blobs are SkSL, and are suitable for pre-compilation. + // 2) Run your application, and save all of the key/data pairs that are fed to the cache. + // + // 3) Switch over to shipping your application. Include the key/data pairs from above. + // 4) At startup (or any convenient time), call precompileShader for each key/data pair. + // This will compile the SkSL to create a GL program, and populate the runtime cache. + // + // This is only guaranteed to work if the context/device used in step #2 are created in the + // same way as the one used in step #4, and the same GrContextOptions are specified. + // Using cached shader blobs on a different device or driver are undefined. + bool precompileShader(const SkData& key, const SkData& data); + +#ifdef SK_ENABLE_DUMP_GPU + /** Returns a string with detailed information about the context & GPU, in JSON format. */ + SkString dump() const; +#endif + + class DirectContextID { + public: + static GrDirectContext::DirectContextID Next(); + + DirectContextID() : fID(SK_InvalidUniqueID) {} + + bool operator==(const DirectContextID& that) const { return fID == that.fID; } + bool operator!=(const DirectContextID& that) const { return !(*this == that); } + + void makeInvalid() { fID = SK_InvalidUniqueID; } + bool isValid() const { return fID != SK_InvalidUniqueID; } + + private: + constexpr DirectContextID(uint32_t id) : fID(id) {} + uint32_t fID; + }; + + DirectContextID directContextID() const { return fDirectContextID; } + + // Provides access to functions that aren't part of the public API. + GrDirectContextPriv priv(); + const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type) + +protected: + GrDirectContext(GrBackendApi backend, const GrContextOptions& options); + + bool init() override; + + GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); } + skgpu::v1::SmallPathAtlasMgr* onGetSmallPathAtlasMgr(); + + GrDirectContext* asDirectContext() override { return this; } + +private: + // This call will make sure out work on the GPU is finished and will execute any outstanding + // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the + // outstanding work on the gpu. The main use currently for this function is when tearing down or + // abandoning the context. + // + // When we finish up work on the GPU it could trigger callbacks to the client. In the case we + // are abandoning the context we don't want the client to be able to use the GrDirectContext to + // issue more commands during the callback. Thus before calling this function we set the + // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded + // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned + // bool is used for this signal. + void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned); + + const DirectContextID fDirectContextID; + // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed + // after all of its users. Clients of fTaskGroup will generally want to ensure that they call + // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being + // invoked after objects they depend upon have already been destroyed. + std::unique_ptr<SkTaskGroup> fTaskGroup; + std::unique_ptr<GrStrikeCache> fStrikeCache; + sk_sp<GrGpu> fGpu; + std::unique_ptr<GrResourceCache> fResourceCache; + std::unique_ptr<GrResourceProvider> fResourceProvider; + + bool fDidTestPMConversions; + // true if the PM/UPM conversion succeeded; false otherwise + bool fPMUPMConversionsRoundTrip; + + GrContextOptions::PersistentCache* fPersistentCache; + + std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager; + std::unique_ptr<GrAtlasManager> fAtlasManager; + + std::unique_ptr<skgpu::v1::SmallPathAtlasMgr> fSmallPathAtlasMgr; + + friend class GrDirectContextPriv; + + using INHERITED = GrRecordingContext; +}; + + +#endif diff --git a/src/deps/skia/include/gpu/GrDriverBugWorkarounds.h b/src/deps/skia/include/gpu/GrDriverBugWorkarounds.h new file mode 100644 index 000000000..c57efc601 --- /dev/null +++ b/src/deps/skia/include/gpu/GrDriverBugWorkarounds.h @@ -0,0 +1,52 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrDriverBugWorkarounds_DEFINED +#define GrDriverBugWorkarounds_DEFINED + +// External embedders of Skia can override this to use their own list +// of workaround names. +#ifdef SK_GPU_WORKAROUNDS_HEADER +#include SK_GPU_WORKAROUNDS_HEADER +#else +// To regenerate this file, set gn arg "skia_generate_workarounds = true". +// This is not rebuilt by default to avoid embedders having to have extra +// build steps. +#include "include/gpu/GrDriverBugWorkaroundsAutogen.h" +#endif + +#include "include/core/SkTypes.h" + +#include <stdint.h> +#include <vector> + +enum GrDriverBugWorkaroundType { +#define GPU_OP(type, name) type, + GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) +#undef GPU_OP + NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES +}; + +class SK_API GrDriverBugWorkarounds { + public: + GrDriverBugWorkarounds(); + GrDriverBugWorkarounds(const GrDriverBugWorkarounds&) = default; + explicit GrDriverBugWorkarounds(const std::vector<int32_t>& workarounds); + + GrDriverBugWorkarounds& operator=(const GrDriverBugWorkarounds&) = default; + + // Turn on any workarounds listed in |workarounds| (but don't turn any off). + void applyOverrides(const GrDriverBugWorkarounds& workarounds); + + ~GrDriverBugWorkarounds(); + +#define GPU_OP(type, name) bool name = false; + GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) +#undef GPU_OP +}; + +#endif diff --git a/src/deps/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h b/src/deps/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h new file mode 100644 index 000000000..4db9479b2 --- /dev/null +++ b/src/deps/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h @@ -0,0 +1,45 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is auto-generated from build_workaround_header.py +// DO NOT EDIT! + +#define GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) \ + GPU_OP(ADD_AND_TRUE_TO_LOOP_CONDITION, \ + add_and_true_to_loop_condition) \ + GPU_OP(DISABLE_BLEND_EQUATION_ADVANCED, \ + disable_blend_equation_advanced) \ + GPU_OP(DISABLE_DISCARD_FRAMEBUFFER, \ + disable_discard_framebuffer) \ + GPU_OP(DISABLE_DUAL_SOURCE_BLENDING_SUPPORT, \ + disable_dual_source_blending_support) \ + GPU_OP(DISABLE_TEXTURE_STORAGE, \ + disable_texture_storage) \ + GPU_OP(DISALLOW_LARGE_INSTANCED_DRAW, \ + disallow_large_instanced_draw) \ + GPU_OP(EMULATE_ABS_INT_FUNCTION, \ + emulate_abs_int_function) \ + GPU_OP(FLUSH_ON_FRAMEBUFFER_CHANGE, \ + flush_on_framebuffer_change) \ + GPU_OP(FORCE_UPDATE_SCISSOR_STATE_WHEN_BINDING_FBO0, \ + force_update_scissor_state_when_binding_fbo0) \ + GPU_OP(GL_CLEAR_BROKEN, \ + gl_clear_broken) \ + GPU_OP(MAX_FRAGMENT_UNIFORM_VECTORS_32, \ + max_fragment_uniform_vectors_32) \ + GPU_OP(MAX_MSAA_SAMPLE_COUNT_4, \ + max_msaa_sample_count_4) \ + GPU_OP(MAX_TEXTURE_SIZE_LIMIT_4096, \ + max_texture_size_limit_4096) \ + GPU_OP(PACK_PARAMETERS_WORKAROUND_WITH_PACK_BUFFER, \ + pack_parameters_workaround_with_pack_buffer) \ + GPU_OP(REMOVE_POW_WITH_CONSTANT_EXPONENT, \ + remove_pow_with_constant_exponent) \ + GPU_OP(REWRITE_DO_WHILE_LOOPS, \ + rewrite_do_while_loops) \ + GPU_OP(UNBIND_ATTACHMENTS_ON_BOUND_RENDER_FBO_DELETE, \ + unbind_attachments_on_bound_render_fbo_delete) \ + GPU_OP(UNFOLD_SHORT_CIRCUIT_AS_TERNARY_OPERATION, \ + unfold_short_circuit_as_ternary_operation) \ +// The End diff --git a/src/deps/skia/include/gpu/GrRecordingContext.h b/src/deps/skia/include/gpu/GrRecordingContext.h new file mode 100644 index 000000000..c287f731b --- /dev/null +++ b/src/deps/skia/include/gpu/GrRecordingContext.h @@ -0,0 +1,275 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrRecordingContext_DEFINED +#define GrRecordingContext_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/private/GrImageContext.h" +#include "include/private/SkTArray.h" + +#if GR_GPU_STATS && GR_TEST_UTILS +#include <map> +#include <string> +#endif + +class GrAuditTrail; +class GrBackendFormat; +class GrDrawingManager; +class GrOnFlushCallbackObject; +class GrMemoryPool; +class GrProgramDesc; +class GrProgramInfo; +class GrProxyProvider; +class GrRecordingContextPriv; +class GrSubRunAllocator; +class GrSurfaceProxy; +class GrTextBlobRedrawCoordinator; +class GrThreadSafeCache; +class SkArenaAlloc; +class SkJSONWriter; + +#if GR_TEST_UTILS +class SkString; +#endif + +class GrRecordingContext : public GrImageContext { +public: + ~GrRecordingContext() override; + + SK_API GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const { + return INHERITED::defaultBackendFormat(ct, renderable); + } + + /** + * Reports whether the GrDirectContext associated with this GrRecordingContext is abandoned. + * When called on a GrDirectContext it may actively check whether the underlying 3D API + * device/context has been disconnected before reporting the status. If so, calling this + * method will transition the GrDirectContext to the abandoned state. + */ + bool abandoned() override { return INHERITED::abandoned(); } + + /* + * Can a SkSurface be created with the given color type. To check whether MSAA is supported + * use maxSurfaceSampleCountForColorType(). + */ + SK_API bool colorTypeSupportedAsSurface(SkColorType colorType) const { + if (kR16G16_unorm_SkColorType == colorType || + kA16_unorm_SkColorType == colorType || + kA16_float_SkColorType == colorType || + kR16G16_float_SkColorType == colorType || + kR16G16B16A16_unorm_SkColorType == colorType || + kGray_8_SkColorType == colorType) { + return false; + } + + return this->maxSurfaceSampleCountForColorType(colorType) > 0; + } + + /** + * Gets the maximum supported texture size. + */ + SK_API int maxTextureSize() const; + + /** + * Gets the maximum supported render target size. + */ + SK_API int maxRenderTargetSize() const; + + /** + * Can a SkImage be created with the given color type. + */ + SK_API bool colorTypeSupportedAsImage(SkColorType) const; + + /** + * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA + * rendering is supported for the color type. 0 is returned if rendering to this color type + * is not supported at all. + */ + SK_API int maxSurfaceSampleCountForColorType(SkColorType) const; + + // Provides access to functions that aren't part of the public API. + GrRecordingContextPriv priv(); + const GrRecordingContextPriv priv() const; // NOLINT(readability-const-return-type) + + // The collection of specialized memory arenas for different types of data recorded by a + // GrRecordingContext. Arenas does not maintain ownership of the pools it groups together. + class Arenas { + public: + Arenas(SkArenaAlloc*, GrSubRunAllocator*); + + // For storing pipelines and other complex data as-needed by ops + SkArenaAlloc* recordTimeAllocator() { return fRecordTimeAllocator; } + + // For storing GrTextBlob SubRuns + GrSubRunAllocator* recordTimeSubRunAllocator() { return fRecordTimeSubRunAllocator; } + + private: + SkArenaAlloc* fRecordTimeAllocator; + GrSubRunAllocator* fRecordTimeSubRunAllocator; + }; + +protected: + friend class GrRecordingContextPriv; // for hidden functions + friend class SkDeferredDisplayList; // for OwnedArenas + friend class SkDeferredDisplayListPriv; // for ProgramData + + // Like Arenas, but preserves ownership of the underlying pools. + class OwnedArenas { + public: + OwnedArenas(bool ddlRecording); + ~OwnedArenas(); + + Arenas get(); + + OwnedArenas& operator=(OwnedArenas&&); + + private: + bool fDDLRecording; + std::unique_ptr<SkArenaAlloc> fRecordTimeAllocator; + std::unique_ptr<GrSubRunAllocator> fRecordTimeSubRunAllocator; + }; + + GrRecordingContext(sk_sp<GrContextThreadSafeProxy>, bool ddlRecording); + + bool init() override; + + void abandonContext() override; + + GrDrawingManager* drawingManager(); + + // There is no going back from this method. It should only be called to control the timing + // during abandon or destruction of the context. + void destroyDrawingManager(); + + Arenas arenas() { return fArenas.get(); } + // This entry point should only be used for DDL creation where we want the ops' lifetime to + // match that of the DDL. + OwnedArenas&& detachArenas(); + + GrProxyProvider* proxyProvider() { return fProxyProvider.get(); } + const GrProxyProvider* proxyProvider() const { return fProxyProvider.get(); } + + struct ProgramData { + ProgramData(std::unique_ptr<const GrProgramDesc>, const GrProgramInfo*); + ProgramData(ProgramData&&); // for SkTArray + ProgramData(const ProgramData&) = delete; + ~ProgramData(); + + const GrProgramDesc& desc() const { return *fDesc; } + const GrProgramInfo& info() const { return *fInfo; } + + private: + // TODO: store the GrProgramDescs in the 'fRecordTimeData' arena + std::unique_ptr<const GrProgramDesc> fDesc; + // The program infos should be stored in 'fRecordTimeData' so do not need to be ref + // counted or deleted in the destructor. + const GrProgramInfo* fInfo = nullptr; + }; + + // This entry point gives the recording context a chance to cache the provided + // programInfo. The DDL context takes this opportunity to store programInfos as a sidecar + // to the DDL. + virtual void recordProgramInfo(const GrProgramInfo*) {} + // This asks the recording context to return any programInfos it may have collected + // via the 'recordProgramInfo' call. It is up to the caller to ensure that the lifetime + // of the programInfos matches the intended use. For example, in DDL-record mode it + // is known that all the programInfos will have been allocated in an arena with the + // same lifetime at the DDL itself. + virtual void detachProgramData(SkTArray<ProgramData>*) {} + + GrTextBlobRedrawCoordinator* getTextBlobRedrawCoordinator(); + const GrTextBlobRedrawCoordinator* getTextBlobRedrawCoordinator() const; + + GrThreadSafeCache* threadSafeCache(); + const GrThreadSafeCache* threadSafeCache() const; + + /** + * Registers an object for flush-related callbacks. (See GrOnFlushCallbackObject.) + * + * NOTE: the drawing manager tracks this object as a raw pointer; it is up to the caller to + * ensure its lifetime is tied to that of the context. + */ + void addOnFlushCallbackObject(GrOnFlushCallbackObject*); + + GrRecordingContext* asRecordingContext() override { return this; } + + class Stats { + public: + Stats() = default; + +#if GR_GPU_STATS + void reset() { *this = {}; } + + int numPathMasksGenerated() const { return fNumPathMasksGenerated; } + void incNumPathMasksGenerated() { fNumPathMasksGenerated++; } + + int numPathMaskCacheHits() const { return fNumPathMaskCacheHits; } + void incNumPathMasksCacheHits() { fNumPathMaskCacheHits++; } + +#if GR_TEST_UTILS + void dump(SkString* out) const; + void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const; +#endif + + private: + int fNumPathMasksGenerated{0}; + int fNumPathMaskCacheHits{0}; + +#else // GR_GPU_STATS + void incNumPathMasksGenerated() {} + void incNumPathMasksCacheHits() {} + +#if GR_TEST_UTILS + void dump(SkString*) const {} + void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const {} +#endif +#endif // GR_GPU_STATS + } fStats; + +#if GR_GPU_STATS && GR_TEST_UTILS + struct DMSAAStats { + void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const; + void dump() const; + void merge(const DMSAAStats&); + int fNumRenderPasses = 0; + int fNumMultisampleRenderPasses = 0; + std::map<std::string, int> fTriggerCounts; + }; + + DMSAAStats fDMSAAStats; +#endif + + Stats* stats() { return &fStats; } + const Stats* stats() const { return &fStats; } + void dumpJSON(SkJSONWriter*) const; + +protected: + // Delete last in case other objects call it during destruction. + std::unique_ptr<GrAuditTrail> fAuditTrail; + +private: + OwnedArenas fArenas; + + std::unique_ptr<GrDrawingManager> fDrawingManager; + std::unique_ptr<GrProxyProvider> fProxyProvider; + +#if GR_TEST_UTILS + int fSuppressWarningMessages = 0; +#endif + + using INHERITED = GrImageContext; +}; + +/** + * Safely cast a possibly-null base context to direct context. + */ +static inline GrDirectContext* GrAsDirectContext(GrContext_Base* base) { + return base ? base->asDirectContext() : nullptr; +} + +#endif diff --git a/src/deps/skia/include/gpu/GrSurfaceInfo.h b/src/deps/skia/include/gpu/GrSurfaceInfo.h new file mode 100644 index 000000000..e4ef3c18d --- /dev/null +++ b/src/deps/skia/include/gpu/GrSurfaceInfo.h @@ -0,0 +1,166 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrSurfaceInfo_DEFINED +#define GrSurfaceInfo_DEFINED + +#include "include/gpu/GrTypes.h" + +#ifdef SK_GL +#include "include/private/GrGLTypesPriv.h" +#endif +#ifdef SK_VULKAN +#include "include/private/GrVkTypesPriv.h" +#endif +#ifdef SK_DIRECT3D +#include "include/private/GrD3DTypesMinimal.h" +struct GrD3DSurfaceInfo; +#endif +#ifdef SK_METAL +#include "include/private/GrMtlTypesPriv.h" +#endif +#ifdef SK_DAWN +#include "include/private/GrDawnTypesPriv.h" +#endif +#include "include/private/GrMockTypesPriv.h" + +class GrSurfaceInfo { +public: + GrSurfaceInfo() {} +#ifdef SK_GL + GrSurfaceInfo(const GrGLSurfaceInfo& glInfo) + : fBackend(GrBackendApi::kOpenGL) + , fValid(true) + , fSampleCount(glInfo.fSampleCount) + , fLevelCount(glInfo.fLevelCount) + , fProtected(glInfo.fProtected) + , fGLSpec(glInfo) {} +#endif +#ifdef SK_VULKAN + GrSurfaceInfo(const GrVkSurfaceInfo& vkInfo) + : fBackend(GrBackendApi::kVulkan) + , fValid(true) + , fSampleCount(vkInfo.fSampleCount) + , fLevelCount(vkInfo.fLevelCount) + , fProtected(vkInfo.fProtected) + , fVkSpec(vkInfo) {} +#endif +#ifdef SK_DIRECT3D + GrSurfaceInfo(const GrD3DSurfaceInfo& d3dInfo); +#endif +#ifdef SK_METAL + GrSurfaceInfo(const GrMtlSurfaceInfo& mtlInfo) + : fBackend(GrBackendApi::kMetal) + , fValid(true) + , fSampleCount(mtlInfo.fSampleCount) + , fLevelCount(mtlInfo.fLevelCount) + , fProtected(mtlInfo.fProtected) + , fMtlSpec(mtlInfo) {} +#endif +#ifdef SK_DAWN + GrSurfaceInfo(const GrDawnSurfaceInfo& dawnInfo) + : fBackend(GrBackendApi::kDawn) + , fValid(true) + , fSampleCount(dawnInfo.fSampleCount) + , fLevelCount(dawnInfo.fLevelCount) + , fProtected(dawnInfo.fProtected) + , fDawnSpec(dawnInfo) {} +#endif + GrSurfaceInfo(const GrMockSurfaceInfo& mockInfo) + : fBackend(GrBackendApi::kMock) + , fValid(true) + , fSampleCount(mockInfo.fSampleCount) + , fLevelCount(mockInfo.fLevelCount) + , fProtected(mockInfo.fProtected) + , fMockSpec(mockInfo) {} + + ~GrSurfaceInfo(); + GrSurfaceInfo(const GrSurfaceInfo&) = default; + + bool isValid() const { return fValid; } + GrBackendApi backend() const { return fBackend; } + + uint32_t numSamples() const { return fSampleCount; } + uint32_t numMipLevels() const { return fLevelCount; } + GrProtected isProtected() const { return fProtected; } + +#ifdef SK_GL + bool getGLSurfaceInfo(GrGLSurfaceInfo* info) const { + if (!this->isValid() || fBackend != GrBackendApi::kOpenGL) { + return false; + } + *info = GrGLTextureSpecToSurfaceInfo(fGLSpec, fSampleCount, fLevelCount, fProtected); + return true; + } +#endif +#ifdef SK_VULKAN + bool getVkSurfaceInfo(GrVkSurfaceInfo* info) const { + if (!this->isValid() || fBackend != GrBackendApi::kVulkan) { + return false; + } + *info = GrVkImageSpecToSurfaceInfo(fVkSpec, fSampleCount, fLevelCount, fProtected); + return true; + } +#endif +#ifdef SK_DIRECT3D + bool getD3DSurfaceInfo(GrD3DSurfaceInfo*) const; +#endif +#ifdef SK_METAL + bool getMtlSurfaceInfo(GrMtlSurfaceInfo* info) const { + if (!this->isValid() || fBackend != GrBackendApi::kMetal) { + return false; + } + *info = GrMtlTextureSpecToSurfaceInfo(fMtlSpec, fSampleCount, fLevelCount, fProtected); + return true; + } +#endif +#ifdef SK_DAWN + bool getDawnSurfaceInfo(GrDawnSurfaceInfo* info) const { + if (!this->isValid() || fBackend != GrBackendApi::kDawn) { + return false; + } + *info = GrDawnTextureSpecToSurfaceInfo(fDawnSpec, fSampleCount, fLevelCount, fProtected); + return true; + } +#endif + bool getMockSurfaceInfo(GrMockSurfaceInfo* info) const { + if (!this->isValid() || fBackend != GrBackendApi::kMock) { + return false; + } + *info = GrMockTextureSpecToSurfaceInfo(fMockSpec, fSampleCount, fLevelCount, fProtected); + return true; + } + +private: + GrBackendApi fBackend = GrBackendApi::kMock; + bool fValid = false; + + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + GrProtected fProtected = GrProtected::kNo; + + union { +#ifdef SK_GL + GrGLTextureSpec fGLSpec; +#endif +#ifdef SK_VULKAN + GrVkImageSpec fVkSpec; +#endif +#ifdef SK_DIRECT3D + GrD3DTextureResourceSpecHolder fD3DSpec; +#endif +#ifdef SK_METAL + GrMtlTextureSpec fMtlSpec; +#endif +#ifdef SK_DAWN + GrDawnTextureSpec fDawnSpec; +#endif + GrMockTextureSpec fMockSpec; + }; +}; + +#endif diff --git a/src/deps/skia/include/gpu/GrTypes.h b/src/deps/skia/include/gpu/GrTypes.h new file mode 100644 index 000000000..20fc62f69 --- /dev/null +++ b/src/deps/skia/include/gpu/GrTypes.h @@ -0,0 +1,244 @@ +/* + * Copyright 2010 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrTypes_DEFINED +#define GrTypes_DEFINED + +#include "include/core/SkMath.h" +#include "include/core/SkTypes.h" +#include "include/gpu/GrConfig.h" + +class GrBackendSemaphore; +class SkImage; +class SkSurface; + +//////////////////////////////////////////////////////////////////////////////// + +/** + * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of + * masking with type safety. Instantiated with the ~ operator. + */ +template<typename TFlags> class GrTFlagsMask { +public: + constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {} + constexpr explicit GrTFlagsMask(int value) : fValue(value) {} + constexpr int value() const { return fValue; } +private: + const int fValue; +}; + +/** + * Defines bitwise operators that make it possible to use an enum class as a + * basic bitfield. + */ +#define GR_MAKE_BITFIELD_CLASS_OPS(X) \ + SK_MAYBE_UNUSED constexpr GrTFlagsMask<X> operator~(X a) { \ + return GrTFlagsMask<X>(~static_cast<int>(a)); \ + } \ + SK_MAYBE_UNUSED constexpr X operator|(X a, X b) { \ + return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \ + } \ + SK_MAYBE_UNUSED inline X& operator|=(X& a, X b) { \ + return (a = a | b); \ + } \ + SK_MAYBE_UNUSED constexpr bool operator&(X a, X b) { \ + return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \ + } \ + SK_MAYBE_UNUSED constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X> a, GrTFlagsMask<X> b) { \ + return GrTFlagsMask<X>(a.value() | b.value()); \ + } \ + SK_MAYBE_UNUSED constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X> a, X b) { \ + return GrTFlagsMask<X>(a.value() | static_cast<int>(b)); \ + } \ + SK_MAYBE_UNUSED constexpr GrTFlagsMask<X> operator|(X a, GrTFlagsMask<X> b) { \ + return GrTFlagsMask<X>(static_cast<int>(a) | b.value()); \ + } \ + SK_MAYBE_UNUSED constexpr X operator&(GrTFlagsMask<X> a, GrTFlagsMask<X> b) { \ + return static_cast<X>(a.value() & b.value()); \ + } \ + SK_MAYBE_UNUSED constexpr X operator&(GrTFlagsMask<X> a, X b) { \ + return static_cast<X>(a.value() & static_cast<int>(b)); \ + } \ + SK_MAYBE_UNUSED constexpr X operator&(X a, GrTFlagsMask<X> b) { \ + return static_cast<X>(static_cast<int>(a) & b.value()); \ + } \ + SK_MAYBE_UNUSED inline X& operator&=(X& a, GrTFlagsMask<X> b) { \ + return (a = a & b); \ + } \ + +#define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \ + friend constexpr GrTFlagsMask<X> operator ~(X); \ + friend constexpr X operator |(X, X); \ + friend X& operator |=(X&, X); \ + friend constexpr bool operator &(X, X); \ + friend constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X>, GrTFlagsMask<X>); \ + friend constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X>, X); \ + friend constexpr GrTFlagsMask<X> operator|(X, GrTFlagsMask<X>); \ + friend constexpr X operator&(GrTFlagsMask<X>, GrTFlagsMask<X>); \ + friend constexpr X operator&(GrTFlagsMask<X>, X); \ + friend constexpr X operator&(X, GrTFlagsMask<X>); \ + friend X& operator &=(X&, GrTFlagsMask<X>) + +/////////////////////////////////////////////////////////////////////////////// + +/** + * Possible 3D APIs that may be used by Ganesh. + */ +enum class GrBackendApi : unsigned { + kOpenGL, + kVulkan, + kMetal, + kDirect3D, + kDawn, + /** + * Mock is a backend that does not draw anything. It is used for unit tests + * and to measure CPU overhead. + */ + kMock, + + /** + * Added here to support the legacy GrBackend enum value and clients who referenced it using + * GrBackend::kOpenGL_GrBackend. + */ + kOpenGL_GrBackend = kOpenGL, +}; + +/** + * Previously the above enum was not an enum class but a normal enum. To support the legacy use of + * the enum values we define them below so that no clients break. + */ +typedef GrBackendApi GrBackend; + +static constexpr GrBackendApi kMetal_GrBackend = GrBackendApi::kMetal; +static constexpr GrBackendApi kVulkan_GrBackend = GrBackendApi::kVulkan; +static constexpr GrBackendApi kMock_GrBackend = GrBackendApi::kMock; + +/////////////////////////////////////////////////////////////////////////////// + +/** + * Used to say whether a texture has mip levels allocated or not. + */ +enum class GrMipmapped : bool { + kNo = false, + kYes = true +}; +/** Deprecated legacy alias of GrMipmapped. */ +using GrMipMapped = GrMipmapped; + +/* + * Can a GrBackendObject be rendered to? + */ +enum class GrRenderable : bool { + kNo = false, + kYes = true +}; + +/* + * Used to say whether texture is backed by protected memory. + */ +enum class GrProtected : bool { + kNo = false, + kYes = true +}; + +/////////////////////////////////////////////////////////////////////////////// + +/** + * GPU SkImage and SkSurfaces can be stored such that (0, 0) in texture space may correspond to + * either the top-left or bottom-left content pixel. + */ +enum GrSurfaceOrigin : int { + kTopLeft_GrSurfaceOrigin, + kBottomLeft_GrSurfaceOrigin, +}; + +/** + * A GrContext's cache of backend context state can be partially invalidated. + * These enums are specific to the GL backend and we'd add a new set for an alternative backend. + */ +enum GrGLBackendState { + kRenderTarget_GrGLBackendState = 1 << 0, + // Also includes samplers bound to texture units. + kTextureBinding_GrGLBackendState = 1 << 1, + // View state stands for scissor and viewport + kView_GrGLBackendState = 1 << 2, + kBlend_GrGLBackendState = 1 << 3, + kMSAAEnable_GrGLBackendState = 1 << 4, + kVertex_GrGLBackendState = 1 << 5, + kStencil_GrGLBackendState = 1 << 6, + kPixelStore_GrGLBackendState = 1 << 7, + kProgram_GrGLBackendState = 1 << 8, + kFixedFunction_GrGLBackendState = 1 << 9, + kMisc_GrGLBackendState = 1 << 10, + kALL_GrGLBackendState = 0xffff +}; + +/** + * This value translates to reseting all the context state for any backend. + */ +static const uint32_t kAll_GrBackendState = 0xffffffff; + +typedef void* GrGpuFinishedContext; +typedef void (*GrGpuFinishedProc)(GrGpuFinishedContext finishedContext); + +typedef void* GrGpuSubmittedContext; +typedef void (*GrGpuSubmittedProc)(GrGpuSubmittedContext submittedContext, bool success); + +/** + * Struct to supply options to flush calls. + * + * After issuing all commands, fNumSemaphore semaphores will be signaled by the gpu. The client + * passes in an array of fNumSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's + * can be either initialized or not. If they are initialized, the backend uses the passed in + * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore + * object is initialized with that semaphore. The semaphores are not sent to the GPU until the next + * GrContext::submit call is made. See the GrContext::submit for more information. + * + * The client will own and be responsible for deleting the underlying semaphores that are stored + * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects + * themselves can be deleted as soon as this function returns. + * + * If a finishedProc is provided, the finishedProc will be called when all work submitted to the gpu + * from this flush call and all previous flush calls has finished on the GPU. If the flush call + * fails due to an error and nothing ends up getting sent to the GPU, the finished proc is called + * immediately. + * + * If a submittedProc is provided, the submittedProc will be called when all work from this flush + * call is submitted to the GPU. If the flush call fails due to an error and nothing will get sent + * to the GPU, the submitted proc is called immediately. It is possibly that when work is finally + * submitted, that the submission actual fails. In this case we will not reattempt to do the + * submission. Skia notifies the client of these via the success bool passed into the submittedProc. + * The submittedProc is useful to the client to know when semaphores that were sent with the flush + * have actually been submitted to the GPU so that they can be waited on (or deleted if the submit + * fails). + * Note about GL: In GL work gets sent to the driver immediately during the flush call, but we don't + * really know when the driver sends the work to the GPU. Therefore, we treat the submitted proc as + * we do in other backends. It will be called when the next GrContext::submit is called after the + * flush (or possibly during the flush if there is no work to be done for the flush). The main use + * case for the submittedProc is to know when semaphores have been sent to the GPU and even in GL + * it is required to call GrContext::submit to flush them. So a client should be able to treat all + * backend APIs the same in terms of how the submitted procs are treated. + */ +struct GrFlushInfo { + size_t fNumSemaphores = 0; + GrBackendSemaphore* fSignalSemaphores = nullptr; + GrGpuFinishedProc fFinishedProc = nullptr; + GrGpuFinishedContext fFinishedContext = nullptr; + GrGpuSubmittedProc fSubmittedProc = nullptr; + GrGpuSubmittedContext fSubmittedContext = nullptr; +}; + +/** + * Enum used as return value when flush with semaphores so the client knows whether the valid + * semaphores will be submitted on the next GrContext::submit call. + */ +enum class GrSemaphoresSubmitted : bool { + kNo = false, + kYes = true +}; + +#endif diff --git a/src/deps/skia/include/gpu/GrYUVABackendTextures.h b/src/deps/skia/include/gpu/GrYUVABackendTextures.h new file mode 100644 index 000000000..edcde7e53 --- /dev/null +++ b/src/deps/skia/include/gpu/GrYUVABackendTextures.h @@ -0,0 +1,124 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrYUVABackendTextures_DEFINED +#define GrYUVABackendTextures_DEFINED + +#include "include/core/SkYUVAInfo.h" +#include "include/gpu/GrBackendSurface.h" + +#include <tuple> + +/** + * A description of a set GrBackendTextures that hold the planar data described by a SkYUVAInfo. + */ +class SK_API GrYUVABackendTextureInfo { +public: + static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes; + + /** Default GrYUVABackendTextureInfo is invalid. */ + GrYUVABackendTextureInfo() = default; + + /** + * Initializes a GrYUVABackendTextureInfo to describe a set of textures that can store the + * planes indicated by the SkYUVAInfo. The texture dimensions are taken from the SkYUVAInfo's + * plane dimensions. All the described textures share a common origin. The planar image this + * describes will be mip mapped if all the textures are individually mip mapped as indicated + * by GrMipmapped. This will produce an invalid result (return false from isValid()) if the + * passed formats' channels don't agree with SkYUVAInfo. + */ + GrYUVABackendTextureInfo(const SkYUVAInfo&, + const GrBackendFormat[kMaxPlanes], + GrMipmapped, + GrSurfaceOrigin); + + GrYUVABackendTextureInfo(const GrYUVABackendTextureInfo&) = default; + + GrYUVABackendTextureInfo& operator=(const GrYUVABackendTextureInfo&) = default; + + bool operator==(const GrYUVABackendTextureInfo&) const; + bool operator!=(const GrYUVABackendTextureInfo& that) const { return !(*this == that); } + + const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; } + + SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); } + + GrMipmapped mipmapped() const { return fMipmapped; } + + GrSurfaceOrigin textureOrigin() const { return fTextureOrigin; } + + /** The number of SkPixmap planes, 0 if this GrYUVABackendTextureInfo is invalid. */ + int numPlanes() const { return fYUVAInfo.numPlanes(); } + + /** Format of the ith plane, or invalid format if i >= numPlanes() */ + const GrBackendFormat& planeFormat(int i) const { return fPlaneFormats[i]; } + + /** + * Returns true if this has been configured with a valid SkYUVAInfo with compatible texture + * formats. + */ + bool isValid() const { return fYUVAInfo.isValid(); } + + /** + * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be + * valid if this->isValid(). + */ + SkYUVAInfo::YUVALocations toYUVALocations() const; + +private: + SkYUVAInfo fYUVAInfo; + GrBackendFormat fPlaneFormats[kMaxPlanes]; + GrMipmapped fMipmapped = GrMipmapped::kNo; + GrSurfaceOrigin fTextureOrigin = kTopLeft_GrSurfaceOrigin; +}; + +/** + * A set of GrBackendTextures that hold the planar data for an image described a SkYUVAInfo. + */ +class SK_API GrYUVABackendTextures { +public: + GrYUVABackendTextures() = default; + GrYUVABackendTextures(const GrYUVABackendTextures&) = delete; + GrYUVABackendTextures(GrYUVABackendTextures&&) = default; + + GrYUVABackendTextures& operator=(const GrYUVABackendTextures&) = delete; + GrYUVABackendTextures& operator=(GrYUVABackendTextures&&) = default; + + GrYUVABackendTextures(const SkYUVAInfo&, + const GrBackendTexture[SkYUVAInfo::kMaxPlanes], + GrSurfaceOrigin textureOrigin); + + const std::array<GrBackendTexture, SkYUVAInfo::kMaxPlanes>& textures() const { + return fTextures; + } + + GrBackendTexture texture(int i) const { + SkASSERT(i >= 0 && i < SkYUVAInfo::kMaxPlanes); + return fTextures[static_cast<size_t>(i)]; + } + + const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; } + + int numPlanes() const { return fYUVAInfo.numPlanes(); } + + GrSurfaceOrigin textureOrigin() const { return fTextureOrigin; } + + bool isValid() const { return fYUVAInfo.isValid(); } + + /** + * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be + * valid if this->isValid(). + */ + SkYUVAInfo::YUVALocations toYUVALocations() const; + +private: + SkYUVAInfo fYUVAInfo; + std::array<GrBackendTexture, SkYUVAInfo::kMaxPlanes> fTextures; + GrSurfaceOrigin fTextureOrigin = kTopLeft_GrSurfaceOrigin; +}; + +#endif diff --git a/src/deps/skia/include/gpu/ShaderErrorHandler.h b/src/deps/skia/include/gpu/ShaderErrorHandler.h new file mode 100644 index 000000000..8960da5c5 --- /dev/null +++ b/src/deps/skia/include/gpu/ShaderErrorHandler.h @@ -0,0 +1,36 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef skgpu_ShaderErrorHandler_DEFINED +#define skgpu_ShaderErrorHandler_DEFINED + +#include "include/core/SkTypes.h" + +namespace skgpu { +/** + * Abstract class to report errors when compiling shaders. + */ +class SK_API ShaderErrorHandler { +public: + virtual ~ShaderErrorHandler() = default; + + virtual void compileError(const char* shader, const char* errors) = 0; + +protected: + ShaderErrorHandler() = default; + ShaderErrorHandler(const ShaderErrorHandler&) = delete; + ShaderErrorHandler& operator=(const ShaderErrorHandler&) = delete; +}; + +/** + * Used when no error handler is set. Will report failures via SkDebugf and asserts. + */ +ShaderErrorHandler* DefaultShaderErrorHandler(); + +} // namespace skgpu + +#endif // skgpu_ShaderErrorHandler_DEFINED diff --git a/src/deps/skia/include/gpu/d3d/BUILD.bazel b/src/deps/skia/include/gpu/d3d/BUILD.bazel new file mode 100644 index 000000000..77a38844f --- /dev/null +++ b/src/deps/skia/include/gpu/d3d/BUILD.bazel @@ -0,0 +1,21 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrD3DBackendContext_hdr", + hdrs = ["GrD3DBackendContext.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrD3DTypes_hdr", + "//include/gpu:GrTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrD3DTypes_hdr", + hdrs = ["GrD3DTypes.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/gpu:GrTypes_hdr", + ], +) diff --git a/src/deps/skia/include/gpu/d3d/GrD3DBackendContext.h b/src/deps/skia/include/gpu/d3d/GrD3DBackendContext.h new file mode 100644 index 000000000..bb85e52e5 --- /dev/null +++ b/src/deps/skia/include/gpu/d3d/GrD3DBackendContext.h @@ -0,0 +1,35 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrD3DBackendContext_DEFINED +#define GrD3DBackendContext_DEFINED + +// GrD3DTypes.h includes d3d12.h, which in turn includes windows.h, which redefines many +// common identifiers such as: +// * interface +// * small +// * near +// * far +// * CreateSemaphore +// * MemoryBarrier +// +// You should only include GrD3DBackendContext.h if you are prepared to rename those identifiers. +#include "include/gpu/d3d/GrD3DTypes.h" + +#include "include/gpu/GrTypes.h" + +// The BackendContext contains all of the base D3D objects needed by the GrD3DGpu. The assumption +// is that the client will set these up and pass them to the GrD3DGpu constructor. +struct SK_API GrD3DBackendContext { + gr_cp<IDXGIAdapter1> fAdapter; + gr_cp<ID3D12Device> fDevice; + gr_cp<ID3D12CommandQueue> fQueue; + sk_sp<GrD3DMemoryAllocator> fMemoryAllocator; + GrProtected fProtectedContext = GrProtected::kNo; +}; + +#endif diff --git a/src/deps/skia/include/gpu/d3d/GrD3DTypes.h b/src/deps/skia/include/gpu/d3d/GrD3DTypes.h new file mode 100644 index 000000000..d2e890f78 --- /dev/null +++ b/src/deps/skia/include/gpu/d3d/GrD3DTypes.h @@ -0,0 +1,248 @@ + +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrD3DTypes_DEFINED +#define GrD3DTypes_DEFINED + +// This file includes d3d12.h, which in turn includes windows.h, which redefines many +// common identifiers such as: +// * interface +// * small +// * near +// * far +// * CreateSemaphore +// * MemoryBarrier +// +// You should only include this header if you need the Direct3D definitions and are +// prepared to rename those identifiers. + +#include "include/core/SkRefCnt.h" +#include "include/gpu/GrTypes.h" +#include <d3d12.h> +#include <dxgi1_4.h> + +class GrD3DGpu; + + /** Check if the argument is non-null, and if so, call obj->AddRef() and return obj. + */ +template <typename T> static inline T* GrSafeComAddRef(T* obj) { + if (obj) { + obj->AddRef(); + } + return obj; +} + +/** Check if the argument is non-null, and if so, call obj->Release() + */ +template <typename T> static inline void GrSafeComRelease(T* obj) { + if (obj) { + obj->Release(); + } +} + +template <typename T> class gr_cp { +public: + using element_type = T; + + constexpr gr_cp() : fObject(nullptr) {} + constexpr gr_cp(std::nullptr_t) : fObject(nullptr) {} + + /** + * Shares the underlying object by calling AddRef(), so that both the argument and the newly + * created gr_cp both have a reference to it. + */ + gr_cp(const gr_cp<T>& that) : fObject(GrSafeComAddRef(that.get())) {} + + /** + * Move the underlying object from the argument to the newly created gr_cp. Afterwards only + * the new gr_cp will have a reference to the object, and the argument will point to null. + * No call to AddRef() or Release() will be made. + */ + gr_cp(gr_cp<T>&& that) : fObject(that.release()) {} + + /** + * Adopt the bare object into the newly created gr_cp. + * No call to AddRef() or Release() will be made. + */ + explicit gr_cp(T* obj) { + fObject = obj; + } + + /** + * Calls Release() on the underlying object pointer. + */ + ~gr_cp() { + GrSafeComRelease(fObject); + SkDEBUGCODE(fObject = nullptr); + } + + /** + * Shares the underlying object referenced by the argument by calling AddRef() on it. If this + * gr_cp previously had a reference to an object (i.e. not null) it will call Release() + * on that object. + */ + gr_cp<T>& operator=(const gr_cp<T>& that) { + if (this != &that) { + this->reset(GrSafeComAddRef(that.get())); + } + return *this; + } + + /** + * Move the underlying object from the argument to the gr_cp. If the gr_cp + * previously held a reference to another object, Release() will be called on that object. + * No call to AddRef() will be made. + */ + gr_cp<T>& operator=(gr_cp<T>&& that) { + this->reset(that.release()); + return *this; + } + + explicit operator bool() const { return this->get() != nullptr; } + + T* get() const { return fObject; } + T* operator->() const { return fObject; } + T** operator&() { return &fObject; } + + /** + * Adopt the new object, and call Release() on any previously held object (if not null). + * No call to AddRef() will be made. + */ + void reset(T* object = nullptr) { + T* oldObject = fObject; + fObject = object; + GrSafeComRelease(oldObject); + } + + /** + * Shares the new object by calling AddRef() on it. If this gr_cp previously had a + * reference to an object (i.e. not null) it will call Release() on that object. + */ + void retain(T* object) { + if (this->fObject != object) { + this->reset(GrSafeComAddRef(object)); + } + } + + /** + * Return the original object, and set the internal object to nullptr. + * The caller must assume ownership of the object, and manage its reference count directly. + * No call to Release() will be made. + */ + T* SK_WARN_UNUSED_RESULT release() { + T* obj = fObject; + fObject = nullptr; + return obj; + } + +private: + T* fObject; +}; + +template <typename T> inline bool operator==(const gr_cp<T>& a, + const gr_cp<T>& b) { + return a.get() == b.get(); +} + +template <typename T> inline bool operator!=(const gr_cp<T>& a, + const gr_cp<T>& b) { + return a.get() != b.get(); +} + +// interface classes for the GPU memory allocator +class GrD3DAlloc : public SkRefCnt { +public: + ~GrD3DAlloc() override = default; +}; + +class GrD3DMemoryAllocator : public SkRefCnt { +public: + virtual gr_cp<ID3D12Resource> createResource(D3D12_HEAP_TYPE, const D3D12_RESOURCE_DESC*, + D3D12_RESOURCE_STATES initialResourceState, + sk_sp<GrD3DAlloc>* allocation, + const D3D12_CLEAR_VALUE*) = 0; + virtual gr_cp<ID3D12Resource> createAliasingResource(sk_sp<GrD3DAlloc>& allocation, + uint64_t localOffset, + const D3D12_RESOURCE_DESC*, + D3D12_RESOURCE_STATES initialResourceState, + const D3D12_CLEAR_VALUE*) = 0; +}; + +// Note: there is no notion of Borrowed or Adopted resources in the D3D backend, +// so Ganesh will ref fResource once it's asked to wrap it. +// Clients are responsible for releasing their own ref to avoid memory leaks. +struct GrD3DTextureResourceInfo { + gr_cp<ID3D12Resource> fResource = nullptr; + sk_sp<GrD3DAlloc> fAlloc = nullptr; + D3D12_RESOURCE_STATES fResourceState = D3D12_RESOURCE_STATE_COMMON; + DXGI_FORMAT fFormat = DXGI_FORMAT_UNKNOWN; + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + unsigned int fSampleQualityPattern = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN; + GrProtected fProtected = GrProtected::kNo; + + GrD3DTextureResourceInfo() = default; + + GrD3DTextureResourceInfo(ID3D12Resource* resource, + const sk_sp<GrD3DAlloc> alloc, + D3D12_RESOURCE_STATES resourceState, + DXGI_FORMAT format, + uint32_t sampleCount, + uint32_t levelCount, + unsigned int sampleQualityLevel, + GrProtected isProtected = GrProtected::kNo) + : fResource(resource) + , fAlloc(alloc) + , fResourceState(resourceState) + , fFormat(format) + , fSampleCount(sampleCount) + , fLevelCount(levelCount) + , fSampleQualityPattern(sampleQualityLevel) + , fProtected(isProtected) {} + + GrD3DTextureResourceInfo(const GrD3DTextureResourceInfo& info, + D3D12_RESOURCE_STATES resourceState) + : fResource(info.fResource) + , fAlloc(info.fAlloc) + , fResourceState(resourceState) + , fFormat(info.fFormat) + , fSampleCount(info.fSampleCount) + , fLevelCount(info.fLevelCount) + , fSampleQualityPattern(info.fSampleQualityPattern) + , fProtected(info.fProtected) {} + +#if GR_TEST_UTILS + bool operator==(const GrD3DTextureResourceInfo& that) const { + return fResource == that.fResource && fResourceState == that.fResourceState && + fFormat == that.fFormat && fSampleCount == that.fSampleCount && + fLevelCount == that.fLevelCount && + fSampleQualityPattern == that.fSampleQualityPattern && fProtected == that.fProtected; + } +#endif +}; + +struct GrD3DFenceInfo { + GrD3DFenceInfo() + : fFence(nullptr) + , fValue(0) { + } + + gr_cp<ID3D12Fence> fFence; + uint64_t fValue; // signal value for the fence +}; + +struct GrD3DSurfaceInfo { + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + GrProtected fProtected = GrProtected::kNo; + + DXGI_FORMAT fFormat = DXGI_FORMAT_UNKNOWN; + unsigned int fSampleQualityPattern = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN; +}; + +#endif diff --git a/src/deps/skia/include/gpu/dawn/BUILD.bazel b/src/deps/skia/include/gpu/dawn/BUILD.bazel new file mode 100644 index 000000000..8b93b918c --- /dev/null +++ b/src/deps/skia/include/gpu/dawn/BUILD.bazel @@ -0,0 +1,8 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrDawnTypes_hdr", + hdrs = ["GrDawnTypes.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/gpu:GrTypes_hdr"], +) diff --git a/src/deps/skia/include/gpu/dawn/GrDawnTypes.h b/src/deps/skia/include/gpu/dawn/GrDawnTypes.h new file mode 100644 index 000000000..640516f6e --- /dev/null +++ b/src/deps/skia/include/gpu/dawn/GrDawnTypes.h @@ -0,0 +1,95 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrDawnTypes_DEFINED +#define GrDawnTypes_DEFINED + +#include "include/gpu/GrTypes.h" + +#ifdef Always +#undef Always +static constexpr int Always = 2; +#endif +#ifdef Success +#undef Success +static constexpr int Success = 0; +#endif +#ifdef None +#undef None +static constexpr int None = 0L; +#endif +#include "dawn/webgpu_cpp.h" + +struct GrDawnTextureInfo { + wgpu::Texture fTexture; + wgpu::TextureFormat fFormat; + uint32_t fLevelCount; + GrDawnTextureInfo() : fTexture(nullptr), fFormat(), fLevelCount(0) { + } + GrDawnTextureInfo(const GrDawnTextureInfo& other) + : fTexture(other.fTexture) + , fFormat(other.fFormat) + , fLevelCount(other.fLevelCount) { + } + GrDawnTextureInfo& operator=(const GrDawnTextureInfo& other) { + fTexture = other.fTexture; + fFormat = other.fFormat; + fLevelCount = other.fLevelCount; + return *this; + } + bool operator==(const GrDawnTextureInfo& other) const { + return fTexture.Get() == other.fTexture.Get() && + fFormat == other.fFormat && + fLevelCount == other.fLevelCount; + } +}; + +// GrDawnRenderTargetInfo holds a reference to a (1-mip) TextureView. This means that, for now, +// GrDawnRenderTarget is suitable for rendering, but not readPixels() or writePixels(). Also, +// backdrop filters and certain blend modes requiring copying the destination framebuffer +// will not work. +struct GrDawnRenderTargetInfo { + wgpu::TextureView fTextureView; + wgpu::TextureFormat fFormat; + uint32_t fLevelCount; + GrDawnRenderTargetInfo() : fTextureView(nullptr), fFormat(), fLevelCount(0) { + } + GrDawnRenderTargetInfo(const GrDawnRenderTargetInfo& other) + : fTextureView(other.fTextureView) + , fFormat(other.fFormat) + , fLevelCount(other.fLevelCount) { + } + explicit GrDawnRenderTargetInfo(const GrDawnTextureInfo& texInfo) + : fFormat(texInfo.fFormat) + , fLevelCount(1) { + wgpu::TextureViewDescriptor desc; + desc.format = texInfo.fFormat; + desc.mipLevelCount = 1; + fTextureView = texInfo.fTexture.CreateView(&desc); + } + GrDawnRenderTargetInfo& operator=(const GrDawnRenderTargetInfo& other) { + fTextureView = other.fTextureView; + fFormat = other.fFormat; + fLevelCount = other.fLevelCount; + return *this; + } + bool operator==(const GrDawnRenderTargetInfo& other) const { + return fTextureView.Get() == other.fTextureView.Get() && + fFormat == other.fFormat && + fLevelCount == other.fLevelCount; + } +}; + +struct GrDawnSurfaceInfo { + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + GrProtected fProtected = GrProtected::kNo; + + wgpu::TextureFormat fFormat; +}; + +#endif diff --git a/src/deps/skia/include/gpu/gl/BUILD.bazel b/src/deps/skia/include/gpu/gl/BUILD.bazel new file mode 100644 index 000000000..c2dcf89d8 --- /dev/null +++ b/src/deps/skia/include/gpu/gl/BUILD.bazel @@ -0,0 +1,70 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrGLAssembleHelpers_hdr", + hdrs = ["GrGLAssembleHelpers.h"], + visibility = ["//:__subpackages__"], + deps = [":GrGLAssembleInterface_hdr"], +) + +generated_cc_atom( + name = "GrGLAssembleInterface_hdr", + hdrs = ["GrGLAssembleInterface.h"], + visibility = ["//:__subpackages__"], + deps = [":GrGLInterface_hdr"], +) + +generated_cc_atom( + name = "GrGLConfig_chrome_hdr", + hdrs = ["GrGLConfig_chrome.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "GrGLConfig_hdr", + hdrs = ["GrGLConfig.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/gpu:GrTypes_hdr"], +) + +generated_cc_atom( + name = "GrGLExtensions_hdr", + hdrs = ["GrGLExtensions.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrGLFunctions_hdr", + "//include/core:SkString_hdr", + "//include/private:SkTArray_hdr", + ], +) + +generated_cc_atom( + name = "GrGLFunctions_hdr", + hdrs = ["GrGLFunctions.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrGLTypes_hdr", + "//include/private:SkTLogic_hdr", + ], +) + +generated_cc_atom( + name = "GrGLInterface_hdr", + hdrs = ["GrGLInterface.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrGLExtensions_hdr", + ":GrGLFunctions_hdr", + "//include/core:SkRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "GrGLTypes_hdr", + hdrs = ["GrGLTypes.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrGLConfig_hdr", + "//include/core:SkRefCnt_hdr", + ], +) diff --git a/src/deps/skia/include/gpu/gl/GrGLAssembleHelpers.h b/src/deps/skia/include/gpu/gl/GrGLAssembleHelpers.h new file mode 100644 index 000000000..bfa2aea37 --- /dev/null +++ b/src/deps/skia/include/gpu/gl/GrGLAssembleHelpers.h @@ -0,0 +1,11 @@ +/* + * Copyright 2019 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "include/gpu/gl/GrGLAssembleInterface.h" + +void GrGetEGLQueryAndDisplay(GrEGLQueryStringFn** queryString, GrEGLDisplay* display, + void* ctx, GrGLGetProc get); diff --git a/src/deps/skia/include/gpu/gl/GrGLAssembleInterface.h b/src/deps/skia/include/gpu/gl/GrGLAssembleInterface.h new file mode 100644 index 000000000..4f9f9f9ee --- /dev/null +++ b/src/deps/skia/include/gpu/gl/GrGLAssembleInterface.h @@ -0,0 +1,39 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "include/gpu/gl/GrGLInterface.h" + +typedef GrGLFuncPtr (*GrGLGetProc)(void* ctx, const char name[]); + +/** + * Generic function for creating a GrGLInterface for an either OpenGL or GLES. It calls + * get() to get each function address. ctx is a generic ptr passed to and interpreted by get(). + */ +SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledInterface(void *ctx, GrGLGetProc get); + +/** + * Generic function for creating a GrGLInterface for an OpenGL (but not GLES) context. It calls + * get() to get each function address. ctx is a generic ptr passed to and interpreted by get(). + */ +SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledGLInterface(void *ctx, GrGLGetProc get); + +/** + * Generic function for creating a GrGLInterface for an OpenGL ES (but not Open GL) context. It + * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by + * get(). + */ +SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledGLESInterface(void *ctx, GrGLGetProc get); + +/** + * Generic function for creating a GrGLInterface for a WebGL (similar to OpenGL ES) context. It + * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by + * get(). + */ +SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledWebGLInterface(void *ctx, GrGLGetProc get); + +/** Deprecated version of GrGLMakeAssembledInterface() that returns a bare pointer. */ +SK_API const GrGLInterface* GrGLAssembleInterface(void *ctx, GrGLGetProc get); diff --git a/src/deps/skia/include/gpu/gl/GrGLConfig.h b/src/deps/skia/include/gpu/gl/GrGLConfig.h new file mode 100644 index 000000000..e3573486c --- /dev/null +++ b/src/deps/skia/include/gpu/gl/GrGLConfig.h @@ -0,0 +1,79 @@ + +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + + +#ifndef GrGLConfig_DEFINED +#define GrGLConfig_DEFINED + +#include "include/gpu/GrTypes.h" + +/** + * Optional GL config file. + */ +#ifdef GR_GL_CUSTOM_SETUP_HEADER + #include GR_GL_CUSTOM_SETUP_HEADER +#endif + +#if !defined(GR_GL_FUNCTION_TYPE) + #if defined(SK_BUILD_FOR_WIN) + #define GR_GL_FUNCTION_TYPE __stdcall + #else + #define GR_GL_FUNCTION_TYPE + #endif +#endif + +/** + * The following are optional defines that can be enabled at the compiler + * command line, in a IDE project, in a GrUserConfig.h file, or in a GL custom + * file (if one is in use). If a GR_GL_CUSTOM_SETUP_HEADER is used they can + * also be placed there. + * + * GR_GL_LOG_CALLS: if 1 Gr can print every GL call using SkDebugf. Defaults to + * 0. Logging can be enabled and disabled at runtime using a debugger via to + * global gLogCallsGL. The initial value of gLogCallsGL is controlled by + * GR_GL_LOG_CALLS_START. + * + * GR_GL_LOG_CALLS_START: controls the initial value of gLogCallsGL when + * GR_GL_LOG_CALLS is 1. Defaults to 0. + * + * GR_GL_CHECK_ERROR: if enabled Gr can do a glGetError() after every GL call. + * Defaults to 1 if SK_DEBUG is set, otherwise 0. When GR_GL_CHECK_ERROR is 1 + * this can be toggled in a debugger using the gCheckErrorGL global. The initial + * value of gCheckErrorGL is controlled by by GR_GL_CHECK_ERROR_START. + * + * GR_GL_CHECK_ERROR_START: controls the initial value of gCheckErrorGL + * when GR_GL_CHECK_ERROR is 1. Defaults to 1. + * + */ + +#if !defined(GR_GL_LOG_CALLS) + #ifdef SK_DEBUG + #define GR_GL_LOG_CALLS 1 + #else + #define GR_GL_LOG_CALLS 0 + #endif +#endif + +#if !defined(GR_GL_LOG_CALLS_START) + #define GR_GL_LOG_CALLS_START 0 +#endif + +#if !defined(GR_GL_CHECK_ERROR) + #ifdef SK_DEBUG + #define GR_GL_CHECK_ERROR 1 + #else + #define GR_GL_CHECK_ERROR 0 + #endif +#endif + +#if !defined(GR_GL_CHECK_ERROR_START) + #define GR_GL_CHECK_ERROR_START 1 +#endif + +#endif diff --git a/src/deps/skia/include/gpu/gl/GrGLConfig_chrome.h b/src/deps/skia/include/gpu/gl/GrGLConfig_chrome.h new file mode 100644 index 000000000..40127d170 --- /dev/null +++ b/src/deps/skia/include/gpu/gl/GrGLConfig_chrome.h @@ -0,0 +1,14 @@ + +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef GrGLConfig_chrome_DEFINED +#define GrGLConfig_chrome_DEFINED + +// glGetError() forces a sync with gpu process on chrome +#define GR_GL_CHECK_ERROR_START 0 + +#endif diff --git a/src/deps/skia/include/gpu/gl/GrGLExtensions.h b/src/deps/skia/include/gpu/gl/GrGLExtensions.h new file mode 100644 index 000000000..1e2823f71 --- /dev/null +++ b/src/deps/skia/include/gpu/gl/GrGLExtensions.h @@ -0,0 +1,78 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrGLExtensions_DEFINED +#define GrGLExtensions_DEFINED + +#include "include/core/SkString.h" +#include "include/gpu/gl/GrGLFunctions.h" +#include "include/private/SkTArray.h" + +#include <utility> + +struct GrGLInterface; +class SkJSONWriter; + +/** + * This helper queries the current GL context for its extensions, remembers them, and can be + * queried. It supports both glGetString- and glGetStringi-style extension string APIs and will + * use the latter if it is available. It also will query for EGL extensions if a eglQueryString + * implementation is provided. + */ +class SK_API GrGLExtensions { +public: + GrGLExtensions() {} + + GrGLExtensions(const GrGLExtensions&); + + GrGLExtensions& operator=(const GrGLExtensions&); + + void swap(GrGLExtensions* that) { + using std::swap; + swap(fStrings, that->fStrings); + swap(fInitialized, that->fInitialized); + } + + /** + * We sometimes need to use this class without having yet created a GrGLInterface. This version + * of init expects that getString is always non-NULL while getIntegerv and getStringi are non- + * NULL if on desktop GL with version 3.0 or higher. Otherwise it will fail. + */ + bool init(GrGLStandard standard, + GrGLFunction<GrGLGetStringFn> getString, + GrGLFunction<GrGLGetStringiFn> getStringi, + GrGLFunction<GrGLGetIntegervFn> getIntegerv, + GrGLFunction<GrEGLQueryStringFn> queryString = nullptr, + GrEGLDisplay eglDisplay = nullptr); + + bool isInitialized() const { return fInitialized; } + + /** + * Queries whether an extension is present. This will fail if init() has not been called. + */ + bool has(const char[]) const; + + /** + * Removes an extension if present. Returns true if the extension was present before the call. + */ + bool remove(const char[]); + + /** + * Adds an extension to list + */ + void add(const char[]); + + void reset() { fStrings.reset(); } + + void dumpJSON(SkJSONWriter*) const; + +private: + bool fInitialized = false; + SkTArray<SkString> fStrings; +}; + +#endif diff --git a/src/deps/skia/include/gpu/gl/GrGLFunctions.h b/src/deps/skia/include/gpu/gl/GrGLFunctions.h new file mode 100644 index 000000000..aef61e065 --- /dev/null +++ b/src/deps/skia/include/gpu/gl/GrGLFunctions.h @@ -0,0 +1,304 @@ + +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrGLFunctions_DEFINED +#define GrGLFunctions_DEFINED + +#include <cstring> +#include "include/gpu/gl/GrGLTypes.h" +#include "include/private/SkTLogic.h" + + +extern "C" { + +/////////////////////////////////////////////////////////////////////////////// + +using GrGLActiveTextureFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum texture); +using GrGLAttachShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint shader); +using GrGLBeginQueryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint id); +using GrGLBindAttribLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint index, const char* name); +using GrGLBindBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint buffer); +using GrGLBindFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint framebuffer); +using GrGLBindRenderbufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint renderbuffer); +using GrGLBindTextureFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint texture); +using GrGLBindFragDataLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint colorNumber, const GrGLchar* name); +using GrGLBindFragDataLocationIndexedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint colorNumber, GrGLuint index, const GrGLchar* name); +using GrGLBindSamplerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint unit, GrGLuint sampler); +using GrGLBindVertexArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint array); +using GrGLBlendBarrierFn = GrGLvoid GR_GL_FUNCTION_TYPE(); +using GrGLBlendColorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha); +using GrGLBlendEquationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode); +using GrGLBlendFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum sfactor, GrGLenum dfactor); +using GrGLBlitFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint srcX0, GrGLint srcY0, GrGLint srcX1, GrGLint srcY1, GrGLint dstX0, GrGLint dstY0, GrGLint dstX1, GrGLint dstY1, GrGLbitfield mask, GrGLenum filter); +using GrGLBufferDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizeiptr size, const GrGLvoid* data, GrGLenum usage); +using GrGLBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr size, const GrGLvoid* data); +using GrGLCheckFramebufferStatusFn = GrGLenum GR_GL_FUNCTION_TYPE(GrGLenum target); +using GrGLClearFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLbitfield mask); +using GrGLClearColorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha); +using GrGLClearStencilFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint s); +using GrGLClearTexImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLenum format, GrGLenum type, const GrGLvoid* data); +using GrGLClearTexSubImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLenum format, GrGLenum type, const GrGLvoid* data); +using GrGLColorMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLboolean red, GrGLboolean green, GrGLboolean blue, GrGLboolean alpha); +using GrGLCompileShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader); +using GrGLCompressedTexImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLenum internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLsizei imageSize, const GrGLvoid* data); +using GrGLCompressedTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLsizei imageSize, const GrGLvoid* data); +using GrGLCopyTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height); +using GrGLCreateProgramFn = GrGLuint GR_GL_FUNCTION_TYPE(); +using GrGLCreateShaderFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLenum type); +using GrGLCullFaceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode); +using GrGLDeleteBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* buffers); +using GrGLDeleteFencesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* fences); +using GrGLDeleteFramebuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* framebuffers); +using GrGLDeleteProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program); +using GrGLDeleteQueriesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* ids); +using GrGLDeleteRenderbuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* renderbuffers); +using GrGLDeleteSamplersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei count, const GrGLuint* samplers); +using GrGLDeleteShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader); +using GrGLDeleteTexturesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* textures); +using GrGLDeleteVertexArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* arrays); +using GrGLDepthMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLboolean flag); +using GrGLDisableFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum cap); +using GrGLDisableVertexAttribArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index); +using GrGLDrawArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count); +using GrGLDrawArraysInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei primcount); +using GrGLDrawArraysIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLvoid* indirect); +using GrGLDrawBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode); +using GrGLDrawBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLenum* bufs); +using GrGLDrawElementsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices); +using GrGLDrawElementsInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices, GrGLsizei primcount); +using GrGLDrawElementsIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect); +using GrGLDrawRangeElementsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLuint start, GrGLuint end, GrGLsizei count, GrGLenum type, const GrGLvoid* indices); +using GrGLEnableFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum cap); +using GrGLEnableVertexAttribArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index); +using GrGLEndQueryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target); +using GrGLFinishFn = GrGLvoid GR_GL_FUNCTION_TYPE(); +using GrGLFinishFenceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint fence); +using GrGLFlushFn = GrGLvoid GR_GL_FUNCTION_TYPE(); +using GrGLFlushMappedBufferRangeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length); +using GrGLFramebufferRenderbufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer); +using GrGLFramebufferTexture2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level); +using GrGLFramebufferTexture2DMultisampleFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLsizei samples); +using GrGLFrontFaceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode); +using GrGLGenBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* buffers); +using GrGLGenFencesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* fences); +using GrGLGenFramebuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* framebuffers); +using GrGLGenerateMipmapFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target); +using GrGLGenQueriesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* ids); +using GrGLGenRenderbuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* renderbuffers); +using GrGLGenSamplersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei count, GrGLuint* samplers); +using GrGLGenTexturesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* textures); +using GrGLGenVertexArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* arrays); +using GrGLGetBufferParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint* params); +using GrGLGetErrorFn = GrGLenum GR_GL_FUNCTION_TYPE(); +using GrGLGetFramebufferAttachmentParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum pname, GrGLint* params); +using GrGLGetIntegervFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint* params); +using GrGLGetMultisamplefvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLuint index, GrGLfloat* val); +using GrGLGetProgramBinaryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, GrGLenum* binaryFormat, void* binary); +using GrGLGetProgramInfoLogFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, char* infolog); +using GrGLGetProgramivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum pname, GrGLint* params); +using GrGLGetQueryivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum GLtarget, GrGLenum pname, GrGLint* params); +using GrGLGetQueryObjecti64vFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLint64* params); +using GrGLGetQueryObjectivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLint* params); +using GrGLGetQueryObjectui64vFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLuint64* params); +using GrGLGetQueryObjectuivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLuint* params); +using GrGLGetRenderbufferParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint* params); +using GrGLGetShaderInfoLogFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLsizei bufsize, GrGLsizei* length, char* infolog); +using GrGLGetShaderivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLenum pname, GrGLint* params); +using GrGLGetShaderPrecisionFormatFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum shadertype, GrGLenum precisiontype, GrGLint* range, GrGLint* precision); +using GrGLGetStringFn = const GrGLubyte* GR_GL_FUNCTION_TYPE(GrGLenum name); +using GrGLGetStringiFn = const GrGLubyte* GR_GL_FUNCTION_TYPE(GrGLenum name, GrGLuint index); +using GrGLGetTexLevelParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLenum pname, GrGLint* params); +using GrGLGetUniformLocationFn = GrGLint GR_GL_FUNCTION_TYPE(GrGLuint program, const char* name); +using GrGLInsertEventMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei length, const char* marker); +using GrGLInvalidateBufferDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint buffer); +using GrGLInvalidateBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length); +using GrGLInvalidateFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments); +using GrGLInvalidateSubFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height); +using GrGLInvalidateTexImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level); +using GrGLInvalidateTexSubImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth); +using GrGLIsTextureFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLuint texture); +using GrGLLineWidthFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLfloat width); +using GrGLLinkProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program); +using GrGLMapBufferFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum access); +using GrGLMapBufferRangeFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access); +using GrGLMapBufferSubDataFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLuint target, GrGLintptr offset, GrGLsizeiptr size, GrGLenum access); +using GrGLMapTexSubImage2DFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLenum access); +using GrGLMemoryBarrierFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLbitfield barriers); +using GrGLPatchParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint value); +using GrGLPixelStoreiFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint param); +using GrGLPolygonModeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum mode); +using GrGLPopGroupMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(); +using GrGLProgramBinaryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum binaryFormat, void* binary, GrGLsizei length); +using GrGLProgramParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum pname, GrGLint value); +using GrGLPushGroupMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei length, const char* marker); +using GrGLQueryCounterFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum target); +using GrGLReadBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum src); +using GrGLReadPixelsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLvoid* pixels); +using GrGLRenderbufferStorageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLsizei width, GrGLsizei height); +using GrGLRenderbufferStorageMultisampleFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei samples, GrGLenum internalformat, GrGLsizei width, GrGLsizei height); +using GrGLResolveMultisampleFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(); +using GrGLSamplerParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, GrGLint params); +using GrGLSamplerParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, const GrGLint* params); +using GrGLScissorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height); +// GL_CHROMIUM_bind_uniform_location +using GrGLBindUniformLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLint location, const char* name); +using GrGLSetFenceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint fence, GrGLenum condition); +using GrGLShaderSourceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLsizei count, const char* const* str, const GrGLint* length); +using GrGLStencilFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum func, GrGLint ref, GrGLuint mask); +using GrGLStencilFuncSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum func, GrGLint ref, GrGLuint mask); +using GrGLStencilMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint mask); +using GrGLStencilMaskSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLuint mask); +using GrGLStencilOpFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum fail, GrGLenum zfail, GrGLenum zpass); +using GrGLStencilOpSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum fail, GrGLenum zfail, GrGLenum zpass); +using GrGLTexBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLuint buffer); +using GrGLTexBufferRangeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size); +using GrGLTexImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid* pixels); +using GrGLTexParameterfFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLfloat param); +using GrGLTexParameterfvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, const GrGLfloat* params); +using GrGLTexParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint param); +using GrGLTexParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, const GrGLint* params); +using GrGLTexStorage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei levels, GrGLenum internalformat, GrGLsizei width, GrGLsizei height); +using GrGLDiscardFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments); +using GrGLTestFenceFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLuint fence); +using GrGLTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, const GrGLvoid* pixels); +using GrGLTextureBarrierFn = GrGLvoid GR_GL_FUNCTION_TYPE(); +using GrGLUniform1fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0); +using GrGLUniform1iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0); +using GrGLUniform1fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v); +using GrGLUniform1ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v); +using GrGLUniform2fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1); +using GrGLUniform2iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1); +using GrGLUniform2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v); +using GrGLUniform2ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v); +using GrGLUniform3fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2); +using GrGLUniform3iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2); +using GrGLUniform3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v); +using GrGLUniform3ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v); +using GrGLUniform4fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2, GrGLfloat v3); +using GrGLUniform4iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2, GrGLint v3); +using GrGLUniform4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v); +using GrGLUniform4ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v); +using GrGLUniformMatrix2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value); +using GrGLUniformMatrix3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value); +using GrGLUniformMatrix4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value); +using GrGLUnmapBufferFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLenum target); +using GrGLUnmapBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(const GrGLvoid* mem); +using GrGLUnmapTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(const GrGLvoid* mem); +using GrGLUseProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program); +using GrGLVertexAttrib1fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat value); +using GrGLVertexAttrib2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values); +using GrGLVertexAttrib3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values); +using GrGLVertexAttrib4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values); +using GrGLVertexAttribDivisorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index, GrGLuint divisor); +using GrGLVertexAttribIPointerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, GrGLint size, GrGLenum type, GrGLsizei stride, const GrGLvoid* ptr); +using GrGLVertexAttribPointerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, GrGLint size, GrGLenum type, GrGLboolean normalized, GrGLsizei stride, const GrGLvoid* ptr); +using GrGLViewportFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height); + +/* GL_NV_framebuffer_mixed_samples */ +using GrGLCoverageModulationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum components); + +/* EXT_base_instance */ +using GrGLDrawArraysInstancedBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei instancecount, GrGLuint baseinstance); +using GrGLDrawElementsInstancedBaseVertexBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const void *indices, GrGLsizei instancecount, GrGLint basevertex, GrGLuint baseinstance); + +/* EXT_multi_draw_indirect */ +using GrGLMultiDrawArraysIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLvoid* indirect, GrGLsizei drawcount, GrGLsizei stride); +using GrGLMultiDrawElementsIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect, GrGLsizei drawcount, GrGLsizei stride); + +/* ANGLE_base_vertex_base_instance */ +using GrGLMultiDrawArraysInstancedBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLint* firsts, const GrGLsizei* counts, const GrGLsizei* instanceCounts, const GrGLuint* baseInstances, const GrGLsizei drawcount); +using GrGLMultiDrawElementsInstancedBaseVertexBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLint* counts, GrGLenum type, const GrGLvoid* const* indices, const GrGLsizei* instanceCounts, const GrGLint* baseVertices, const GrGLuint* baseInstances, const GrGLsizei drawcount); + +/* ARB_sync */ +using GrGLFenceSyncFn = GrGLsync GR_GL_FUNCTION_TYPE(GrGLenum condition, GrGLbitfield flags); +using GrGLIsSyncFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLsync sync); +using GrGLClientWaitSyncFn = GrGLenum GR_GL_FUNCTION_TYPE(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout); +using GrGLWaitSyncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout); +using GrGLDeleteSyncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsync sync); + +/* ARB_internalformat_query */ +using GrGLGetInternalformativFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLenum pname, GrGLsizei bufSize, GrGLint* params); + +/* KHR_debug */ +using GrGLDebugMessageControlFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLenum type, GrGLenum severity, GrGLsizei count, const GrGLuint* ids, GrGLboolean enabled); +using GrGLDebugMessageInsertFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLenum type, GrGLuint id, GrGLenum severity, GrGLsizei length, const GrGLchar* buf); +using GrGLDebugMessageCallbackFn = GrGLvoid GR_GL_FUNCTION_TYPE(GRGLDEBUGPROC callback, const GrGLvoid* userParam); +using GrGLGetDebugMessageLogFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLuint count, GrGLsizei bufSize, GrGLenum* sources, GrGLenum* types, GrGLuint* ids, GrGLenum* severities, GrGLsizei* lengths, GrGLchar* messageLog); +using GrGLPushDebugGroupFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLuint id, GrGLsizei length, const GrGLchar* message); +using GrGLPopDebugGroupFn = GrGLvoid GR_GL_FUNCTION_TYPE(); +using GrGLObjectLabelFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum identifier, GrGLuint name, GrGLsizei length, const GrGLchar* label); + +/** EXT_window_rectangles */ +using GrGLWindowRectanglesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, const GrGLint box[]); + +/** GL_QCOM_tiled_rendering */ +using GrGLStartTilingFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint x, GrGLuint y, GrGLuint width, GrGLuint height, GrGLbitfield preserveMask); +using GrGLEndTilingFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLbitfield preserveMask); + +/** EGL functions */ +using GrEGLQueryStringFn = const char* GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLint name); +using GrEGLGetCurrentDisplayFn = GrEGLDisplay GR_GL_FUNCTION_TYPE(); +using GrEGLCreateImageFn = GrEGLImage GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLContext ctx, GrEGLenum target, GrEGLClientBuffer buffer, const GrEGLint* attrib_list); +using GrEGLDestroyImageFn = GrEGLBoolean GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLImage image); +} // extern "C" + +// This is a lighter-weight std::function, trying to reduce code size and compile time +// by only supporting the exact use cases we require. +template <typename T> class GrGLFunction; + +template <typename R, typename... Args> +class GrGLFunction<R GR_GL_FUNCTION_TYPE(Args...)> { +public: + using Fn = R GR_GL_FUNCTION_TYPE(Args...); + // Construct empty. + GrGLFunction() = default; + GrGLFunction(std::nullptr_t) {} + + // Construct from a simple function pointer. + GrGLFunction(Fn* fn_ptr) { + static_assert(sizeof(fn_ptr) <= sizeof(fBuf), "fBuf is too small"); + if (fn_ptr) { + memcpy(fBuf, &fn_ptr, sizeof(fn_ptr)); + fCall = [](const void* buf, Args... args) { + return (*(Fn**)buf)(std::forward<Args>(args)...); + }; + } + } + + // Construct from a small closure. + template <typename Closure> + GrGLFunction(Closure closure) : GrGLFunction() { + static_assert(sizeof(Closure) <= sizeof(fBuf), "fBuf is too small"); +#if defined(__APPLE__) // I am having serious trouble getting these to work with all STLs... + static_assert(std::is_trivially_copyable<Closure>::value, ""); + static_assert(std::is_trivially_destructible<Closure>::value, ""); +#endif + + memcpy(fBuf, &closure, sizeof(closure)); + fCall = [](const void* buf, Args... args) { + auto closure = (const Closure*)buf; + return (*closure)(args...); + }; + } + + R operator()(Args... args) const { + SkASSERT(fCall); + return fCall(fBuf, std::forward<Args>(args)...); + } + + explicit operator bool() const { return fCall != nullptr; } + + void reset() { fCall = nullptr; } + +private: + using Call = R(const void* buf, Args...); + Call* fCall = nullptr; + size_t fBuf[4]; +}; + +#endif diff --git a/src/deps/skia/include/gpu/gl/GrGLInterface.h b/src/deps/skia/include/gpu/gl/GrGLInterface.h new file mode 100644 index 000000000..af3ca67dc --- /dev/null +++ b/src/deps/skia/include/gpu/gl/GrGLInterface.h @@ -0,0 +1,342 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrGLInterface_DEFINED +#define GrGLInterface_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/gpu/gl/GrGLExtensions.h" +#include "include/gpu/gl/GrGLFunctions.h" + +//////////////////////////////////////////////////////////////////////////////// + +typedef void(*GrGLFuncPtr)(); +struct GrGLInterface; + + +/** + * Rather than depend on platform-specific GL headers and libraries, we require + * the client to provide a struct of GL function pointers. This struct can be + * specified per-GrContext as a parameter to GrContext::MakeGL. If no interface is + * passed to MakeGL then a default GL interface is created using GrGLMakeNativeInterface(). + * If this returns nullptr then GrContext::MakeGL() will fail. + * + * The implementation of GrGLMakeNativeInterface is platform-specific. Several + * implementations have been provided (for GLX, WGL, EGL, etc), along with an + * implementation that simply returns nullptr. Clients should select the most + * appropriate one to build. + */ +SK_API sk_sp<const GrGLInterface> GrGLMakeNativeInterface(); +// Deprecated alternative to GrGLMakeNativeInterface(). +SK_API const GrGLInterface* GrGLCreateNativeInterface(); + +/** + * GrContext uses the following interface to make all calls into OpenGL. When a + * GrContext is created it is given a GrGLInterface. The interface's function + * pointers must be valid for the OpenGL context associated with the GrContext. + * On some platforms, such as Windows, function pointers for OpenGL extensions + * may vary between OpenGL contexts. So the caller must be careful to use a + * GrGLInterface initialized for the correct context. All functions that should + * be available based on the OpenGL's version and extension string must be + * non-NULL or GrContext creation will fail. This can be tested with the + * validate() method when the OpenGL context has been made current. + */ +struct SK_API GrGLInterface : public SkRefCnt { +private: + using INHERITED = SkRefCnt; + +#if GR_GL_CHECK_ERROR + // This is here to avoid having our debug code that checks for a GL error after most GL calls + // accidentally swallow an OOM that should be reported. + mutable bool fOOMed = false; + bool fSuppressErrorLogging = false; +#endif + +public: + GrGLInterface(); + + // Validates that the GrGLInterface supports its advertised standard. This means the necessary + // function pointers have been initialized for both the GL version and any advertised + // extensions. + bool validate() const; + +#if GR_GL_CHECK_ERROR + GrGLenum checkError(const char* location, const char* call) const; + bool checkAndResetOOMed() const; + void suppressErrorLogging(); +#endif + +#if GR_TEST_UTILS + GrGLInterface(const GrGLInterface& that) + : fStandard(that.fStandard) + , fExtensions(that.fExtensions) + , fFunctions(that.fFunctions) {} +#endif + + // Indicates the type of GL implementation + union { + GrGLStandard fStandard; + GrGLStandard fBindingsExported; // Legacy name, will be remove when Chromium is updated. + }; + + GrGLExtensions fExtensions; + + bool hasExtension(const char ext[]) const { return fExtensions.has(ext); } + + /** + * The function pointers are in a struct so that we can have a compiler generated assignment + * operator. + */ + struct Functions { + GrGLFunction<GrGLActiveTextureFn> fActiveTexture; + GrGLFunction<GrGLAttachShaderFn> fAttachShader; + GrGLFunction<GrGLBeginQueryFn> fBeginQuery; + GrGLFunction<GrGLBindAttribLocationFn> fBindAttribLocation; + GrGLFunction<GrGLBindBufferFn> fBindBuffer; + GrGLFunction<GrGLBindFragDataLocationFn> fBindFragDataLocation; + GrGLFunction<GrGLBindFragDataLocationIndexedFn> fBindFragDataLocationIndexed; + GrGLFunction<GrGLBindFramebufferFn> fBindFramebuffer; + GrGLFunction<GrGLBindRenderbufferFn> fBindRenderbuffer; + GrGLFunction<GrGLBindSamplerFn> fBindSampler; + GrGLFunction<GrGLBindTextureFn> fBindTexture; + GrGLFunction<GrGLBindVertexArrayFn> fBindVertexArray; + GrGLFunction<GrGLBlendBarrierFn> fBlendBarrier; + GrGLFunction<GrGLBlendColorFn> fBlendColor; + GrGLFunction<GrGLBlendEquationFn> fBlendEquation; + GrGLFunction<GrGLBlendFuncFn> fBlendFunc; + GrGLFunction<GrGLBlitFramebufferFn> fBlitFramebuffer; + GrGLFunction<GrGLBufferDataFn> fBufferData; + GrGLFunction<GrGLBufferSubDataFn> fBufferSubData; + GrGLFunction<GrGLCheckFramebufferStatusFn> fCheckFramebufferStatus; + GrGLFunction<GrGLClearFn> fClear; + GrGLFunction<GrGLClearColorFn> fClearColor; + GrGLFunction<GrGLClearStencilFn> fClearStencil; + GrGLFunction<GrGLClearTexImageFn> fClearTexImage; + GrGLFunction<GrGLClearTexSubImageFn> fClearTexSubImage; + GrGLFunction<GrGLColorMaskFn> fColorMask; + GrGLFunction<GrGLCompileShaderFn> fCompileShader; + GrGLFunction<GrGLCompressedTexImage2DFn> fCompressedTexImage2D; + GrGLFunction<GrGLCompressedTexSubImage2DFn> fCompressedTexSubImage2D; + GrGLFunction<GrGLCopyTexSubImage2DFn> fCopyTexSubImage2D; + GrGLFunction<GrGLCreateProgramFn> fCreateProgram; + GrGLFunction<GrGLCreateShaderFn> fCreateShader; + GrGLFunction<GrGLCullFaceFn> fCullFace; + GrGLFunction<GrGLDeleteBuffersFn> fDeleteBuffers; + GrGLFunction<GrGLDeleteFencesFn> fDeleteFences; + GrGLFunction<GrGLDeleteFramebuffersFn> fDeleteFramebuffers; + GrGLFunction<GrGLDeleteProgramFn> fDeleteProgram; + GrGLFunction<GrGLDeleteQueriesFn> fDeleteQueries; + GrGLFunction<GrGLDeleteRenderbuffersFn> fDeleteRenderbuffers; + GrGLFunction<GrGLDeleteSamplersFn> fDeleteSamplers; + GrGLFunction<GrGLDeleteShaderFn> fDeleteShader; + GrGLFunction<GrGLDeleteTexturesFn> fDeleteTextures; + GrGLFunction<GrGLDeleteVertexArraysFn> fDeleteVertexArrays; + GrGLFunction<GrGLDepthMaskFn> fDepthMask; + GrGLFunction<GrGLDisableFn> fDisable; + GrGLFunction<GrGLDisableVertexAttribArrayFn> fDisableVertexAttribArray; + GrGLFunction<GrGLDrawArraysFn> fDrawArrays; + GrGLFunction<GrGLDrawArraysIndirectFn> fDrawArraysIndirect; + GrGLFunction<GrGLDrawArraysInstancedFn> fDrawArraysInstanced; + GrGLFunction<GrGLDrawBufferFn> fDrawBuffer; + GrGLFunction<GrGLDrawBuffersFn> fDrawBuffers; + GrGLFunction<GrGLDrawElementsFn> fDrawElements; + GrGLFunction<GrGLDrawElementsIndirectFn> fDrawElementsIndirect; + GrGLFunction<GrGLDrawElementsInstancedFn> fDrawElementsInstanced; + GrGLFunction<GrGLDrawRangeElementsFn> fDrawRangeElements; + GrGLFunction<GrGLEnableFn> fEnable; + GrGLFunction<GrGLEnableVertexAttribArrayFn> fEnableVertexAttribArray; + GrGLFunction<GrGLEndQueryFn> fEndQuery; + GrGLFunction<GrGLFinishFn> fFinish; + GrGLFunction<GrGLFinishFenceFn> fFinishFence; + GrGLFunction<GrGLFlushFn> fFlush; + GrGLFunction<GrGLFlushMappedBufferRangeFn> fFlushMappedBufferRange; + GrGLFunction<GrGLFramebufferRenderbufferFn> fFramebufferRenderbuffer; + GrGLFunction<GrGLFramebufferTexture2DFn> fFramebufferTexture2D; + GrGLFunction<GrGLFramebufferTexture2DMultisampleFn> fFramebufferTexture2DMultisample; + GrGLFunction<GrGLFrontFaceFn> fFrontFace; + GrGLFunction<GrGLGenBuffersFn> fGenBuffers; + GrGLFunction<GrGLGenFencesFn> fGenFences; + GrGLFunction<GrGLGenFramebuffersFn> fGenFramebuffers; + GrGLFunction<GrGLGenerateMipmapFn> fGenerateMipmap; + GrGLFunction<GrGLGenQueriesFn> fGenQueries; + GrGLFunction<GrGLGenRenderbuffersFn> fGenRenderbuffers; + GrGLFunction<GrGLGenSamplersFn> fGenSamplers; + GrGLFunction<GrGLGenTexturesFn> fGenTextures; + GrGLFunction<GrGLGenVertexArraysFn> fGenVertexArrays; + GrGLFunction<GrGLGetBufferParameterivFn> fGetBufferParameteriv; + GrGLFunction<GrGLGetErrorFn> fGetError; + GrGLFunction<GrGLGetFramebufferAttachmentParameterivFn> fGetFramebufferAttachmentParameteriv; + GrGLFunction<GrGLGetIntegervFn> fGetIntegerv; + GrGLFunction<GrGLGetMultisamplefvFn> fGetMultisamplefv; + GrGLFunction<GrGLGetProgramBinaryFn> fGetProgramBinary; + GrGLFunction<GrGLGetProgramInfoLogFn> fGetProgramInfoLog; + GrGLFunction<GrGLGetProgramivFn> fGetProgramiv; + GrGLFunction<GrGLGetQueryObjecti64vFn> fGetQueryObjecti64v; + GrGLFunction<GrGLGetQueryObjectivFn> fGetQueryObjectiv; + GrGLFunction<GrGLGetQueryObjectui64vFn> fGetQueryObjectui64v; + GrGLFunction<GrGLGetQueryObjectuivFn> fGetQueryObjectuiv; + GrGLFunction<GrGLGetQueryivFn> fGetQueryiv; + GrGLFunction<GrGLGetRenderbufferParameterivFn> fGetRenderbufferParameteriv; + GrGLFunction<GrGLGetShaderInfoLogFn> fGetShaderInfoLog; + GrGLFunction<GrGLGetShaderivFn> fGetShaderiv; + GrGLFunction<GrGLGetShaderPrecisionFormatFn> fGetShaderPrecisionFormat; + GrGLFunction<GrGLGetStringFn> fGetString; + GrGLFunction<GrGLGetStringiFn> fGetStringi; + GrGLFunction<GrGLGetTexLevelParameterivFn> fGetTexLevelParameteriv; + GrGLFunction<GrGLGetUniformLocationFn> fGetUniformLocation; + GrGLFunction<GrGLInsertEventMarkerFn> fInsertEventMarker; + GrGLFunction<GrGLInvalidateBufferDataFn> fInvalidateBufferData; + GrGLFunction<GrGLInvalidateBufferSubDataFn> fInvalidateBufferSubData; + GrGLFunction<GrGLInvalidateFramebufferFn> fInvalidateFramebuffer; + GrGLFunction<GrGLInvalidateSubFramebufferFn> fInvalidateSubFramebuffer; + GrGLFunction<GrGLInvalidateTexImageFn> fInvalidateTexImage; + GrGLFunction<GrGLInvalidateTexSubImageFn> fInvalidateTexSubImage; + GrGLFunction<GrGLIsTextureFn> fIsTexture; + GrGLFunction<GrGLLineWidthFn> fLineWidth; + GrGLFunction<GrGLLinkProgramFn> fLinkProgram; + GrGLFunction<GrGLProgramBinaryFn> fProgramBinary; + GrGLFunction<GrGLProgramParameteriFn> fProgramParameteri; + GrGLFunction<GrGLMapBufferFn> fMapBuffer; + GrGLFunction<GrGLMapBufferRangeFn> fMapBufferRange; + GrGLFunction<GrGLMapBufferSubDataFn> fMapBufferSubData; + GrGLFunction<GrGLMapTexSubImage2DFn> fMapTexSubImage2D; + GrGLFunction<GrGLMemoryBarrierFn> fMemoryBarrier; + GrGLFunction<GrGLDrawArraysInstancedBaseInstanceFn> fDrawArraysInstancedBaseInstance; + GrGLFunction<GrGLDrawElementsInstancedBaseVertexBaseInstanceFn> fDrawElementsInstancedBaseVertexBaseInstance; + GrGLFunction<GrGLMultiDrawArraysIndirectFn> fMultiDrawArraysIndirect; + GrGLFunction<GrGLMultiDrawElementsIndirectFn> fMultiDrawElementsIndirect; + GrGLFunction<GrGLMultiDrawArraysInstancedBaseInstanceFn> fMultiDrawArraysInstancedBaseInstance; + GrGLFunction<GrGLMultiDrawElementsInstancedBaseVertexBaseInstanceFn> fMultiDrawElementsInstancedBaseVertexBaseInstance; + GrGLFunction<GrGLPatchParameteriFn> fPatchParameteri; + GrGLFunction<GrGLPixelStoreiFn> fPixelStorei; + GrGLFunction<GrGLPolygonModeFn> fPolygonMode; + GrGLFunction<GrGLPopGroupMarkerFn> fPopGroupMarker; + GrGLFunction<GrGLPushGroupMarkerFn> fPushGroupMarker; + GrGLFunction<GrGLQueryCounterFn> fQueryCounter; + GrGLFunction<GrGLReadBufferFn> fReadBuffer; + GrGLFunction<GrGLReadPixelsFn> fReadPixels; + GrGLFunction<GrGLRenderbufferStorageFn> fRenderbufferStorage; + + // On OpenGL ES there are multiple incompatible extensions that add support for MSAA + // and ES3 adds MSAA support to the standard. On an ES3 driver we may still use the + // older extensions for performance reasons or due to ES3 driver bugs. We want the function + // that creates the GrGLInterface to provide all available functions and internally + // we will select among them. They all have a method called glRenderbufferStorageMultisample*. + // So we have separate function pointers for GL_IMG/EXT_multisampled_to_texture, + // GL_CHROMIUM/ANGLE_framebuffer_multisample/ES3, and GL_APPLE_framebuffer_multisample + // variations. + // + // If a driver supports multiple GL_ARB_framebuffer_multisample-style extensions then we will + // assume the function pointers for the standard (or equivalent GL_ARB) version have + // been preferred over GL_EXT, GL_CHROMIUM, or GL_ANGLE variations that have reduced + // functionality. + + // GL_EXT_multisampled_render_to_texture (preferred) or GL_IMG_multisampled_render_to_texture + GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisampleES2EXT; + // GL_APPLE_framebuffer_multisample + GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisampleES2APPLE; + + // This is used to store the pointer for GL_ARB/EXT/ANGLE/CHROMIUM_framebuffer_multisample or + // the standard function in ES3+ or GL 3.0+. + GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisample; + + // Pointer to BindUniformLocationCHROMIUM from the GL_CHROMIUM_bind_uniform_location extension. + GrGLFunction<GrGLBindUniformLocationFn> fBindUniformLocation; + + GrGLFunction<GrGLResolveMultisampleFramebufferFn> fResolveMultisampleFramebuffer; + GrGLFunction<GrGLSamplerParameteriFn> fSamplerParameteri; + GrGLFunction<GrGLSamplerParameterivFn> fSamplerParameteriv; + GrGLFunction<GrGLScissorFn> fScissor; + GrGLFunction<GrGLSetFenceFn> fSetFence; + GrGLFunction<GrGLShaderSourceFn> fShaderSource; + GrGLFunction<GrGLStencilFuncFn> fStencilFunc; + GrGLFunction<GrGLStencilFuncSeparateFn> fStencilFuncSeparate; + GrGLFunction<GrGLStencilMaskFn> fStencilMask; + GrGLFunction<GrGLStencilMaskSeparateFn> fStencilMaskSeparate; + GrGLFunction<GrGLStencilOpFn> fStencilOp; + GrGLFunction<GrGLStencilOpSeparateFn> fStencilOpSeparate; + GrGLFunction<GrGLTestFenceFn> fTestFence; + GrGLFunction<GrGLTexBufferFn> fTexBuffer; + GrGLFunction<GrGLTexBufferRangeFn> fTexBufferRange; + GrGLFunction<GrGLTexImage2DFn> fTexImage2D; + GrGLFunction<GrGLTexParameterfFn> fTexParameterf; + GrGLFunction<GrGLTexParameterfvFn> fTexParameterfv; + GrGLFunction<GrGLTexParameteriFn> fTexParameteri; + GrGLFunction<GrGLTexParameterivFn> fTexParameteriv; + GrGLFunction<GrGLTexSubImage2DFn> fTexSubImage2D; + GrGLFunction<GrGLTexStorage2DFn> fTexStorage2D; + GrGLFunction<GrGLTextureBarrierFn> fTextureBarrier; + GrGLFunction<GrGLDiscardFramebufferFn> fDiscardFramebuffer; + GrGLFunction<GrGLUniform1fFn> fUniform1f; + GrGLFunction<GrGLUniform1iFn> fUniform1i; + GrGLFunction<GrGLUniform1fvFn> fUniform1fv; + GrGLFunction<GrGLUniform1ivFn> fUniform1iv; + GrGLFunction<GrGLUniform2fFn> fUniform2f; + GrGLFunction<GrGLUniform2iFn> fUniform2i; + GrGLFunction<GrGLUniform2fvFn> fUniform2fv; + GrGLFunction<GrGLUniform2ivFn> fUniform2iv; + GrGLFunction<GrGLUniform3fFn> fUniform3f; + GrGLFunction<GrGLUniform3iFn> fUniform3i; + GrGLFunction<GrGLUniform3fvFn> fUniform3fv; + GrGLFunction<GrGLUniform3ivFn> fUniform3iv; + GrGLFunction<GrGLUniform4fFn> fUniform4f; + GrGLFunction<GrGLUniform4iFn> fUniform4i; + GrGLFunction<GrGLUniform4fvFn> fUniform4fv; + GrGLFunction<GrGLUniform4ivFn> fUniform4iv; + GrGLFunction<GrGLUniformMatrix2fvFn> fUniformMatrix2fv; + GrGLFunction<GrGLUniformMatrix3fvFn> fUniformMatrix3fv; + GrGLFunction<GrGLUniformMatrix4fvFn> fUniformMatrix4fv; + GrGLFunction<GrGLUnmapBufferFn> fUnmapBuffer; + GrGLFunction<GrGLUnmapBufferSubDataFn> fUnmapBufferSubData; + GrGLFunction<GrGLUnmapTexSubImage2DFn> fUnmapTexSubImage2D; + GrGLFunction<GrGLUseProgramFn> fUseProgram; + GrGLFunction<GrGLVertexAttrib1fFn> fVertexAttrib1f; + GrGLFunction<GrGLVertexAttrib2fvFn> fVertexAttrib2fv; + GrGLFunction<GrGLVertexAttrib3fvFn> fVertexAttrib3fv; + GrGLFunction<GrGLVertexAttrib4fvFn> fVertexAttrib4fv; + GrGLFunction<GrGLVertexAttribDivisorFn> fVertexAttribDivisor; + GrGLFunction<GrGLVertexAttribIPointerFn> fVertexAttribIPointer; + GrGLFunction<GrGLVertexAttribPointerFn> fVertexAttribPointer; + GrGLFunction<GrGLViewportFn> fViewport; + + /* NV_framebuffer_mixed_samples */ + GrGLFunction<GrGLCoverageModulationFn> fCoverageModulation; + + /* ARB_sync */ + GrGLFunction<GrGLFenceSyncFn> fFenceSync; + GrGLFunction<GrGLIsSyncFn> fIsSync; + GrGLFunction<GrGLClientWaitSyncFn> fClientWaitSync; + GrGLFunction<GrGLWaitSyncFn> fWaitSync; + GrGLFunction<GrGLDeleteSyncFn> fDeleteSync; + + /* ARB_internalforamt_query */ + GrGLFunction<GrGLGetInternalformativFn> fGetInternalformativ; + + /* KHR_debug */ + GrGLFunction<GrGLDebugMessageControlFn> fDebugMessageControl; + GrGLFunction<GrGLDebugMessageInsertFn> fDebugMessageInsert; + GrGLFunction<GrGLDebugMessageCallbackFn> fDebugMessageCallback; + GrGLFunction<GrGLGetDebugMessageLogFn> fGetDebugMessageLog; + GrGLFunction<GrGLPushDebugGroupFn> fPushDebugGroup; + GrGLFunction<GrGLPopDebugGroupFn> fPopDebugGroup; + GrGLFunction<GrGLObjectLabelFn> fObjectLabel; + + /* EXT_window_rectangles */ + GrGLFunction<GrGLWindowRectanglesFn> fWindowRectangles; + + /* GL_QCOM_tiled_rendering */ + GrGLFunction<GrGLStartTilingFn> fStartTiling; + GrGLFunction<GrGLEndTilingFn> fEndTiling; + } fFunctions; + +#if GR_TEST_UTILS + // This exists for internal testing. + virtual void abandon() const; +#endif +}; + +#endif diff --git a/src/deps/skia/include/gpu/gl/GrGLTypes.h b/src/deps/skia/include/gpu/gl/GrGLTypes.h new file mode 100644 index 000000000..d5167787c --- /dev/null +++ b/src/deps/skia/include/gpu/gl/GrGLTypes.h @@ -0,0 +1,207 @@ + +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrGLTypes_DEFINED +#define GrGLTypes_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/gpu/gl/GrGLConfig.h" + +/** + * Classifies GL contexts by which standard they implement (currently as OpenGL vs. OpenGL ES). + */ +enum GrGLStandard { + kNone_GrGLStandard, + kGL_GrGLStandard, + kGLES_GrGLStandard, + kWebGL_GrGLStandard, +}; +static const int kGrGLStandardCnt = 4; + +// The following allow certain interfaces to be turned off at compile time +// (for example, to lower code size). +#if SK_ASSUME_GL_ES + #define GR_IS_GR_GL(standard) false + #define GR_IS_GR_GL_ES(standard) true + #define GR_IS_GR_WEBGL(standard) false + #define SK_DISABLE_GL_INTERFACE 1 + #define SK_DISABLE_WEBGL_INTERFACE 1 +#elif SK_ASSUME_GL + #define GR_IS_GR_GL(standard) true + #define GR_IS_GR_GL_ES(standard) false + #define GR_IS_GR_WEBGL(standard) false + #define SK_DISABLE_GL_ES_INTERFACE 1 + #define SK_DISABLE_WEBGL_INTERFACE 1 +#elif SK_ASSUME_WEBGL + #define GR_IS_GR_GL(standard) false + #define GR_IS_GR_GL_ES(standard) false + #define GR_IS_GR_WEBGL(standard) true + #define SK_DISABLE_GL_ES_INTERFACE 1 + #define SK_DISABLE_GL_INTERFACE 1 +#else + #define GR_IS_GR_GL(standard) (kGL_GrGLStandard == standard) + #define GR_IS_GR_GL_ES(standard) (kGLES_GrGLStandard == standard) + #define GR_IS_GR_WEBGL(standard) (kWebGL_GrGLStandard == standard) +#endif + +/////////////////////////////////////////////////////////////////////////////// + +/** + * The supported GL formats represented as an enum. Actual support by GrContext depends on GL + * context version and extensions. + */ +enum class GrGLFormat { + kUnknown, + + kRGBA8, + kR8, + kALPHA8, + kLUMINANCE8, + kLUMINANCE8_ALPHA8, + kBGRA8, + kRGB565, + kRGBA16F, + kR16F, + kRGB8, + kRGBX8, + kRG8, + kRGB10_A2, + kRGBA4, + kSRGB8_ALPHA8, + kCOMPRESSED_ETC1_RGB8, + kCOMPRESSED_RGB8_ETC2, + kCOMPRESSED_RGB8_BC1, + kCOMPRESSED_RGBA8_BC1, + kR16, + kRG16, + kRGBA16, + kRG16F, + kLUMINANCE16F, + + kLastColorFormat = kLUMINANCE16F, + + // Depth/Stencil formats + kSTENCIL_INDEX8, + kSTENCIL_INDEX16, + kDEPTH24_STENCIL8, + + kLast = kDEPTH24_STENCIL8 +}; + +/////////////////////////////////////////////////////////////////////////////// +/** + * Declares typedefs for all the GL functions used in GrGLInterface + */ + +typedef unsigned int GrGLenum; +typedef unsigned char GrGLboolean; +typedef unsigned int GrGLbitfield; +typedef signed char GrGLbyte; +typedef char GrGLchar; +typedef short GrGLshort; +typedef int GrGLint; +typedef int GrGLsizei; +typedef int64_t GrGLint64; +typedef unsigned char GrGLubyte; +typedef unsigned short GrGLushort; +typedef unsigned int GrGLuint; +typedef uint64_t GrGLuint64; +typedef unsigned short int GrGLhalf; +typedef float GrGLfloat; +typedef float GrGLclampf; +typedef double GrGLdouble; +typedef double GrGLclampd; +typedef void GrGLvoid; +#ifdef _WIN64 +typedef signed long long int GrGLintptr; +typedef signed long long int GrGLsizeiptr; +#else +typedef signed long int GrGLintptr; +typedef signed long int GrGLsizeiptr; +#endif +typedef void* GrGLeglImage; +typedef struct __GLsync* GrGLsync; + +struct GrGLDrawArraysIndirectCommand { + GrGLuint fCount; + GrGLuint fInstanceCount; + GrGLuint fFirst; + GrGLuint fBaseInstance; // Requires EXT_base_instance on ES. +}; + +// static_asserts must have messages in this file because its included in C++14 client code. +static_assert(16 == sizeof(GrGLDrawArraysIndirectCommand), ""); + +struct GrGLDrawElementsIndirectCommand { + GrGLuint fCount; + GrGLuint fInstanceCount; + GrGLuint fFirstIndex; + GrGLuint fBaseVertex; + GrGLuint fBaseInstance; // Requires EXT_base_instance on ES. +}; + +static_assert(20 == sizeof(GrGLDrawElementsIndirectCommand), ""); + +/** + * KHR_debug + */ +typedef void (GR_GL_FUNCTION_TYPE* GRGLDEBUGPROC)(GrGLenum source, + GrGLenum type, + GrGLuint id, + GrGLenum severity, + GrGLsizei length, + const GrGLchar* message, + const void* userParam); + +/** + * EGL types. + */ +typedef void* GrEGLImage; +typedef void* GrEGLDisplay; +typedef void* GrEGLContext; +typedef void* GrEGLClientBuffer; +typedef unsigned int GrEGLenum; +typedef int32_t GrEGLint; +typedef unsigned int GrEGLBoolean; + +/////////////////////////////////////////////////////////////////////////////// +/** + * Types for interacting with GL resources created externally to Skia. GrBackendObjects for GL + * textures are really const GrGLTexture*. The fFormat here should be a sized, internal format + * for the texture. We will try to use the sized format if the GL Context supports it, otherwise + * we will internally fall back to using the base internal formats. + */ +struct GrGLTextureInfo { + GrGLenum fTarget; + GrGLuint fID; + GrGLenum fFormat = 0; + + bool operator==(const GrGLTextureInfo& that) const { + return fTarget == that.fTarget && fID == that.fID && fFormat == that.fFormat; + } +}; + +struct GrGLFramebufferInfo { + GrGLuint fFBOID; + GrGLenum fFormat = 0; + + bool operator==(const GrGLFramebufferInfo& that) const { + return fFBOID == that.fFBOID && fFormat == that.fFormat; + } +}; + +struct GrGLSurfaceInfo { + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + GrProtected fProtected = GrProtected::kNo; + + GrGLenum fTarget = 0; + GrGLenum fFormat = 0; +}; + +#endif diff --git a/src/deps/skia/include/gpu/gl/egl/BUILD.bazel b/src/deps/skia/include/gpu/gl/egl/BUILD.bazel new file mode 100644 index 000000000..4e824371a --- /dev/null +++ b/src/deps/skia/include/gpu/gl/egl/BUILD.bazel @@ -0,0 +1,11 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrGLMakeEGLInterface_hdr", + hdrs = ["GrGLMakeEGLInterface.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/gpu:GrTypes_hdr", + ], +) diff --git a/src/deps/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h b/src/deps/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h new file mode 100644 index 000000000..a3eb420b0 --- /dev/null +++ b/src/deps/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h @@ -0,0 +1,14 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "include/gpu/GrTypes.h" + +#include "include/core/SkRefCnt.h" + +struct GrGLInterface; + +sk_sp<const GrGLInterface> GrGLMakeEGLInterface(); diff --git a/src/deps/skia/include/gpu/gl/glx/BUILD.bazel b/src/deps/skia/include/gpu/gl/glx/BUILD.bazel new file mode 100644 index 000000000..c336d4974 --- /dev/null +++ b/src/deps/skia/include/gpu/gl/glx/BUILD.bazel @@ -0,0 +1,11 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrGLMakeGLXInterface_hdr", + hdrs = ["GrGLMakeGLXInterface.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/gpu:GrTypes_hdr", + ], +) diff --git a/src/deps/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h b/src/deps/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h new file mode 100644 index 000000000..b49cde458 --- /dev/null +++ b/src/deps/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h @@ -0,0 +1,14 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "include/gpu/GrTypes.h" + +#include "include/core/SkRefCnt.h" + +struct GrGLInterface; + +sk_sp<const GrGLInterface> GrGLMakeGLXInterface(); diff --git a/src/deps/skia/include/gpu/mock/BUILD.bazel b/src/deps/skia/include/gpu/mock/BUILD.bazel new file mode 100644 index 000000000..b7f4a9e06 --- /dev/null +++ b/src/deps/skia/include/gpu/mock/BUILD.bazel @@ -0,0 +1,11 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrMockTypes_hdr", + hdrs = ["GrMockTypes.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/gpu:GrTypes_hdr", + "//include/private:GrTypesPriv_hdr", + ], +) diff --git a/src/deps/skia/include/gpu/mock/GrMockTypes.h b/src/deps/skia/include/gpu/mock/GrMockTypes.h new file mode 100644 index 000000000..dbffe3ceb --- /dev/null +++ b/src/deps/skia/include/gpu/mock/GrMockTypes.h @@ -0,0 +1,144 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrMockOptions_DEFINED +#define GrMockOptions_DEFINED + +#include "include/gpu/GrTypes.h" +#include "include/private/GrTypesPriv.h" + +class GrBackendFormat; + +struct GrMockTextureInfo { + GrMockTextureInfo() + : fColorType(GrColorType::kUnknown) + , fCompressionType(SkImage::CompressionType::kNone) + , fID(0) {} + + GrMockTextureInfo(GrColorType colorType, + SkImage::CompressionType compressionType, + int id) + : fColorType(colorType) + , fCompressionType(compressionType) + , fID(id) { + SkASSERT(fID); + if (fCompressionType != SkImage::CompressionType::kNone) { + SkASSERT(colorType == GrColorType::kUnknown); + } + } + + bool operator==(const GrMockTextureInfo& that) const { + return fColorType == that.fColorType && + fCompressionType == that.fCompressionType && + fID == that.fID; + } + + GrBackendFormat getBackendFormat() const; + + SkImage::CompressionType compressionType() const { return fCompressionType; } + + GrColorType colorType() const { + SkASSERT(fCompressionType == SkImage::CompressionType::kNone); + return fColorType; + } + + int id() const { return fID; } + +private: + GrColorType fColorType; + SkImage::CompressionType fCompressionType; + int fID; +}; + +struct GrMockRenderTargetInfo { + GrMockRenderTargetInfo() + : fColorType(GrColorType::kUnknown) + , fID(0) {} + + GrMockRenderTargetInfo(GrColorType colorType, int id) + : fColorType(colorType) + , fID(id) { + SkASSERT(fID); + } + + bool operator==(const GrMockRenderTargetInfo& that) const { + return fColorType == that.fColorType && + fID == that.fID; + } + + GrBackendFormat getBackendFormat() const; + + GrColorType colorType() const { return fColorType; } + +private: + GrColorType fColorType; + int fID; +}; + +struct GrMockSurfaceInfo { + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + GrProtected fProtected = GrProtected::kNo; + + GrColorType fColorType = GrColorType::kUnknown; + SkImage::CompressionType fCompressionType = SkImage::CompressionType::kNone; +}; + +/** + * A pointer to this type is used as the GrBackendContext when creating a Mock GrContext. It can be + * used to specify capability options for the mock context. If nullptr is used a default constructed + * GrMockOptions is used. + */ +struct GrMockOptions { + GrMockOptions() { + using Renderability = ConfigOptions::Renderability; + // By default RGBA_8888 and BGRA_8888 are textureable and renderable and + // A8 and RGB565 are texturable. + fConfigOptions[(int)GrColorType::kRGBA_8888].fRenderability = Renderability::kNonMSAA; + fConfigOptions[(int)GrColorType::kRGBA_8888].fTexturable = true; + fConfigOptions[(int)GrColorType::kAlpha_8].fTexturable = true; + fConfigOptions[(int)GrColorType::kBGR_565].fTexturable = true; + + fConfigOptions[(int)GrColorType::kBGRA_8888] = fConfigOptions[(int)GrColorType::kRGBA_8888]; + + fCompressedOptions[(int)SkImage::CompressionType::kETC2_RGB8_UNORM].fTexturable = true; + fCompressedOptions[(int)SkImage::CompressionType::kBC1_RGB8_UNORM].fTexturable = true; + fCompressedOptions[(int)SkImage::CompressionType::kBC1_RGBA8_UNORM].fTexturable = true; + } + + struct ConfigOptions { + enum Renderability { kNo, kNonMSAA, kMSAA }; + Renderability fRenderability = kNo; + bool fTexturable = false; + }; + + // GrCaps options. + bool fMipmapSupport = false; + bool fDrawInstancedSupport = false; + bool fHalfFloatVertexAttributeSupport = false; + uint32_t fMapBufferFlags = 0; + int fMaxTextureSize = 2048; + int fMaxRenderTargetSize = 2048; + int fMaxWindowRectangles = 0; + int fMaxVertexAttributes = 16; + int fMaxTessellationSegments = 0; + ConfigOptions fConfigOptions[kGrColorTypeCnt]; + ConfigOptions fCompressedOptions[SkImage::kCompressionTypeCount]; + + // GrShaderCaps options. + bool fIntegerSupport = false; + bool fFlatInterpolationSupport = false; + int fMaxVertexSamplers = 0; + int fMaxFragmentSamplers = 8; + bool fShaderDerivativeSupport = true; + bool fDualSourceBlendingSupport = false; + + // GrMockGpu options. + bool fFailTextureAllocations = false; +}; + +#endif diff --git a/src/deps/skia/include/gpu/mtl/BUILD.bazel b/src/deps/skia/include/gpu/mtl/BUILD.bazel new file mode 100644 index 000000000..9881ff21f --- /dev/null +++ b/src/deps/skia/include/gpu/mtl/BUILD.bazel @@ -0,0 +1,18 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrMtlBackendContext_hdr", + hdrs = ["GrMtlBackendContext.h"], + visibility = ["//:__subpackages__"], + deps = [":GrMtlTypes_hdr"], +) + +generated_cc_atom( + name = "GrMtlTypes_hdr", + hdrs = ["GrMtlTypes.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/gpu:GrTypes_hdr", + "//include/ports:SkCFObject_hdr", + ], +) diff --git a/src/deps/skia/include/gpu/mtl/GrMtlBackendContext.h b/src/deps/skia/include/gpu/mtl/GrMtlBackendContext.h new file mode 100644 index 000000000..0d88f479a --- /dev/null +++ b/src/deps/skia/include/gpu/mtl/GrMtlBackendContext.h @@ -0,0 +1,21 @@ +/* + * Copyright 2020 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrMtlBackendContext_DEFINED +#define GrMtlBackendContext_DEFINED + +#include "include/gpu/mtl/GrMtlTypes.h" + +// The BackendContext contains all of the base Metal objects needed by the GrMtlGpu. The assumption +// is that the client will set these up and pass them to the GrMtlGpu constructor. +struct SK_API GrMtlBackendContext { + sk_cfp<GrMTLHandle> fDevice; + sk_cfp<GrMTLHandle> fQueue; + sk_cfp<GrMTLHandle> fBinaryArchive; +}; + +#endif diff --git a/src/deps/skia/include/gpu/mtl/GrMtlTypes.h b/src/deps/skia/include/gpu/mtl/GrMtlTypes.h new file mode 100644 index 000000000..f7a232e3c --- /dev/null +++ b/src/deps/skia/include/gpu/mtl/GrMtlTypes.h @@ -0,0 +1,63 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrMtlTypes_DEFINED +#define GrMtlTypes_DEFINED + +#include "include/gpu/GrTypes.h" +#include "include/ports/SkCFObject.h" + +/** + * Declares typedefs for Metal types used in Ganesh cpp code + */ +using GrMTLPixelFormat = unsigned int; +using GrMTLTextureUsage = unsigned int; +using GrMTLStorageMode = unsigned int; +using GrMTLHandle = const void*; + +/////////////////////////////////////////////////////////////////////////////// + +#ifdef __APPLE__ + +#include <TargetConditionals.h> + +#if TARGET_OS_SIMULATOR +#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(13.0)) +#else // TARGET_OS_SIMULATOR +#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(8.0)) +#endif // TARGET_OS_SIMULATOR + +/** + * Types for interacting with Metal resources created externally to Skia. + * This is used by GrBackendObjects. + */ +struct GrMtlTextureInfo { +public: + GrMtlTextureInfo() {} + + sk_cfp<GrMTLHandle> fTexture; + + bool operator==(const GrMtlTextureInfo& that) const { + return fTexture == that.fTexture; + } +}; + +struct GrMtlSurfaceInfo { + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + GrProtected fProtected = GrProtected::kNo; + + // Since we aren't in an Obj-C header we can't directly use Mtl types here. Each of these can + // cast to their mapped Mtl types list below. + GrMTLPixelFormat fFormat = 0; // MTLPixelFormat fFormat = MTLPixelFormatInvalid; + GrMTLTextureUsage fUsage = 0; // MTLTextureUsage fUsage = MTLTextureUsageUnknown; + GrMTLStorageMode fStorageMode = 0; // MTLStorageMode fStorageMode = MTLStorageModeShared; +}; + +#endif + +#endif diff --git a/src/deps/skia/include/gpu/vk/BUILD.bazel b/src/deps/skia/include/gpu/vk/BUILD.bazel new file mode 100644 index 000000000..a2f41750e --- /dev/null +++ b/src/deps/skia/include/gpu/vk/BUILD.bazel @@ -0,0 +1,56 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrVkBackendContext_hdr", + hdrs = ["GrVkBackendContext.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrVkMemoryAllocator_hdr", + ":GrVkTypes_hdr", + "//include/core:SkRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "GrVkExtensions_hdr", + hdrs = ["GrVkExtensions.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrVkTypes_hdr", + "//include/core:SkString_hdr", + "//include/private:SkTArray_hdr", + ], +) + +generated_cc_atom( + name = "GrVkMemoryAllocator_hdr", + hdrs = ["GrVkMemoryAllocator.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrVkTypes_hdr", + "//include/core:SkRefCnt_hdr", + "//include/gpu:GrTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrVkTypes_hdr", + hdrs = ["GrVkTypes.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrVkVulkan_hdr", + "//include/core:SkTypes_hdr", + "//include/gpu:GrTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrVkVulkan_hdr", + hdrs = ["GrVkVulkan.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkTypes_hdr", + "//include/third_party/vulkan/vulkan:vulkan_android_hdr", + "//include/third_party/vulkan/vulkan:vulkan_core_hdr", + ], +) diff --git a/src/deps/skia/include/gpu/vk/GrVkBackendContext.h b/src/deps/skia/include/gpu/vk/GrVkBackendContext.h new file mode 100644 index 000000000..a4fd336ff --- /dev/null +++ b/src/deps/skia/include/gpu/vk/GrVkBackendContext.h @@ -0,0 +1,76 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrVkBackendContext_DEFINED +#define GrVkBackendContext_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/gpu/vk/GrVkMemoryAllocator.h" +#include "include/gpu/vk/GrVkTypes.h" + +class GrVkExtensions; + +enum GrVkExtensionFlags { + kEXT_debug_report_GrVkExtensionFlag = 0x0001, + kNV_glsl_shader_GrVkExtensionFlag = 0x0002, + kKHR_surface_GrVkExtensionFlag = 0x0004, + kKHR_swapchain_GrVkExtensionFlag = 0x0008, + kKHR_win32_surface_GrVkExtensionFlag = 0x0010, + kKHR_android_surface_GrVkExtensionFlag = 0x0020, + kKHR_xcb_surface_GrVkExtensionFlag = 0x0040, +}; + +enum GrVkFeatureFlags { + kGeometryShader_GrVkFeatureFlag = 0x0001, + kDualSrcBlend_GrVkFeatureFlag = 0x0002, + kSampleRateShading_GrVkFeatureFlag = 0x0004, +}; + +// It is not guarenteed VkPhysicalDeviceProperties2 will be in the client's header so we forward +// declare it here to be safe. +struct VkPhysicalDeviceFeatures2; + +// The BackendContext contains all of the base Vulkan objects needed by the GrVkGpu. The assumption +// is that the client will set these up and pass them to the GrVkGpu constructor. The VkDevice +// created must support at least one graphics queue, which is passed in as well. +// The QueueFamilyIndex must match the family of the given queue. It is needed for CommandPool +// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) needs to be created +// in or transitioned to that family. The refs held by members of this struct must be released +// (either by deleting the struct or manually releasing the refs) before the underlying vulkan +// device and instance are destroyed. +struct SK_API GrVkBackendContext { + VkInstance fInstance; + VkPhysicalDevice fPhysicalDevice; + VkDevice fDevice; + VkQueue fQueue; + uint32_t fGraphicsQueueIndex; + uint32_t fMinAPIVersion; // Deprecated. Set fInstanceVersion instead. + uint32_t fInstanceVersion = 0; // Deprecated. Set fMaxApiVersion instead + // The max api version set here should match the value set in VkApplicationInfo::apiVersion when + // then VkInstance was created. + uint32_t fMaxAPIVersion = 0; + uint32_t fExtensions = 0; // Deprecated. Use fVkExtensions instead. + const GrVkExtensions* fVkExtensions = nullptr; + uint32_t fFeatures; // Deprecated. Use fDeviceFeatures[2] instead. + // The client can create their VkDevice with either a VkPhysicalDeviceFeatures or + // VkPhysicalDeviceFeatures2 struct, thus we have to support taking both. The + // VkPhysicalDeviceFeatures2 struct is needed so we know if the client enabled any extension + // specific features. If fDeviceFeatures2 is not null then we ignore fDeviceFeatures. If both + // fDeviceFeatures and fDeviceFeatures2 are null we will assume no features are enabled. + const VkPhysicalDeviceFeatures* fDeviceFeatures = nullptr; + const VkPhysicalDeviceFeatures2* fDeviceFeatures2 = nullptr; + sk_sp<GrVkMemoryAllocator> fMemoryAllocator; + GrVkGetProc fGetProc = nullptr; + // This is deprecated and should be set to false. The client is responsible for managing the + // lifetime of the VkInstance and VkDevice objects. + bool fOwnsInstanceAndDevice = false; + // Indicates that we are working with protected content and all CommandPool and Queue operations + // should be done in a protected context. + GrProtected fProtectedContext = GrProtected::kNo; +}; + +#endif diff --git a/src/deps/skia/include/gpu/vk/GrVkExtensions.h b/src/deps/skia/include/gpu/vk/GrVkExtensions.h new file mode 100644 index 000000000..e78543dc7 --- /dev/null +++ b/src/deps/skia/include/gpu/vk/GrVkExtensions.h @@ -0,0 +1,63 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrVkExtensions_DEFINED +#define GrVkExtensions_DEFINED + +#include "include/core/SkString.h" +#include "include/gpu/vk/GrVkTypes.h" +#include "include/private/SkTArray.h" + +/** + * Helper class that eats in an array of extensions strings for instance and device and allows for + * quicker querying if an extension is present. + */ +class SK_API GrVkExtensions { +public: + GrVkExtensions() {} + + void init(GrVkGetProc, VkInstance, VkPhysicalDevice, + uint32_t instanceExtensionCount, const char* const* instanceExtensions, + uint32_t deviceExtensionCount, const char* const* deviceExtensions); + + bool hasExtension(const char[], uint32_t minVersion) const; + + struct Info { + Info() {} + Info(const char* name) : fName(name), fSpecVersion(0) {} + + SkString fName; + uint32_t fSpecVersion; + + struct Less { + bool operator()(const Info& a, const SkString& b) const { + return strcmp(a.fName.c_str(), b.c_str()) < 0; + } + bool operator()(const SkString& a, const GrVkExtensions::Info& b) const { + return strcmp(a.c_str(), b.fName.c_str()) < 0; + } + }; + }; + +#ifdef SK_DEBUG + void dump() const { + SkDebugf("**Vulkan Extensions**\n"); + for (int i = 0; i < fExtensions.count(); ++i) { + SkDebugf("%s. Version: %d\n", + fExtensions[i].fName.c_str(), fExtensions[i].fSpecVersion); + } + SkDebugf("**End Vulkan Extensions**\n"); + } +#endif + +private: + void getSpecVersions(GrVkGetProc getProc, VkInstance, VkPhysicalDevice); + + SkTArray<Info> fExtensions; +}; + +#endif diff --git a/src/deps/skia/include/gpu/vk/GrVkMemoryAllocator.h b/src/deps/skia/include/gpu/vk/GrVkMemoryAllocator.h new file mode 100644 index 000000000..e3782dba1 --- /dev/null +++ b/src/deps/skia/include/gpu/vk/GrVkMemoryAllocator.h @@ -0,0 +1,140 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrVkMemoryAllocator_DEFINED +#define GrVkMemoryAllocator_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/gpu/GrTypes.h" +#include "include/gpu/vk/GrVkTypes.h" + +class GrVkMemoryAllocator : public SkRefCnt { +public: + enum class AllocationPropertyFlags { + kNone = 0, + // Allocation will be placed in its own VkDeviceMemory and not suballocated from some larger + // block. + kDedicatedAllocation = 0x1, + // Says that the backing memory can only be accessed by the device. Additionally the device + // may lazily allocate the memory. This cannot be used with buffers that will be host + // visible. Setting this flag does not guarantee that we will allocate memory that respects + // it, but we will try to prefer memory that can respect it. + kLazyAllocation = 0x2, + // The allocation will be mapped immediately and stay mapped until it is destroyed. This + // flag is only valid for buffers which are host visible (i.e. must have a usage other than + // BufferUsage::kGpuOnly). + kPersistentlyMapped = 0x4, + // Allocation can only be accessed by the device using a protected context. + kProtected = 0x8, + }; + + GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(AllocationPropertyFlags); + + enum class BufferUsage { + // Buffers that will only be accessed from the device (large const buffers). Will always be + // in device local memory. + kGpuOnly, + // Buffers that typically will be updated multiple times by the host and read on the gpu + // (e.g. uniform or vertex buffers). CPU writes will generally be sequential in the buffer + // and will try to take advantage of the write-combined nature of the gpu buffers. Thus this + // will always be mappable and coherent memory, and it will prefer to be in device local + // memory. + kCpuWritesGpuReads, + // Buffers that will be accessed on the host and copied to another GPU resource (transfer + // buffers). Will always be mappable and coherent memory. + kTransfersFromCpuToGpu, + // Buffers which are typically writted to by the GPU and then read on the host. Will always + // be mappable memory, and will prefer cached memory. + kTransfersFromGpuToCpu, + }; + + // DEPRECATED: Use and implement allocateImageMemory instead + virtual bool allocateMemoryForImage(VkImage, AllocationPropertyFlags, GrVkBackendMemory*) { + // The default implementation here is so clients can delete this virtual as the switch to + // the new one which returns a VkResult. + return false; + } + + virtual VkResult allocateImageMemory(VkImage image, AllocationPropertyFlags flags, + GrVkBackendMemory* memory) { + bool result = this->allocateMemoryForImage(image, flags, memory); + // VK_ERROR_INITIALIZATION_FAILED is a bogus result to return from this function, but it is + // just something to return that is not VK_SUCCESS and can't be interpreted by a caller to + // mean something specific happened like device lost or oom. This will be removed once we + // update clients to implement this virtual. + return result ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; + } + + // DEPRECATED: Use and implement allocateBufferMemory instead + virtual bool allocateMemoryForBuffer(VkBuffer, BufferUsage, AllocationPropertyFlags, + GrVkBackendMemory*) { + // The default implementation here is so clients can delete this virtual as the switch to + // the new one which returns a VkResult. + return false; + } + + virtual VkResult allocateBufferMemory(VkBuffer buffer, + BufferUsage usage, + AllocationPropertyFlags flags, + GrVkBackendMemory* memory) { + bool result = this->allocateMemoryForBuffer(buffer, usage, flags, memory); + // VK_ERROR_INITIALIZATION_FAILED is a bogus result to return from this function, but it is + // just something to return that is not VK_SUCCESS and can't be interpreted by a caller to + // mean something specific happened like device lost or oom. This will be removed once we + // update clients to implement this virtual. + return result ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; + } + + + // Fills out the passed in GrVkAlloc struct for the passed in GrVkBackendMemory. + virtual void getAllocInfo(const GrVkBackendMemory&, GrVkAlloc*) const = 0; + + // Maps the entire allocation and returns a pointer to the start of the allocation. The + // implementation may map more memory than just the allocation, but the returned pointer must + // point at the start of the memory for the requested allocation. + virtual void* mapMemory(const GrVkBackendMemory&) { return nullptr; } + virtual VkResult mapMemory(const GrVkBackendMemory& memory, void** data) { + *data = this->mapMemory(memory); + // VK_ERROR_INITIALIZATION_FAILED is a bogus result to return from this function, but it is + // just something to return that is not VK_SUCCESS and can't be interpreted by a caller to + // mean something specific happened like device lost or oom. This will be removed once we + // update clients to implement this virtual. + return *data ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; + } + virtual void unmapMemory(const GrVkBackendMemory&) = 0; + + // The following two calls are used for managing non-coherent memory. The offset is relative to + // the start of the allocation and not the underlying VkDeviceMemory. Additionaly the client + // must make sure that the offset + size passed in is less that or equal to the allocation size. + // It is the responsibility of the implementation to make sure all alignment requirements are + // followed. The client should not have to deal with any sort of alignment issues. + virtual void flushMappedMemory(const GrVkBackendMemory&, VkDeviceSize, VkDeviceSize) {} + virtual VkResult flushMemory(const GrVkBackendMemory& memory, VkDeviceSize offset, + VkDeviceSize size) { + this->flushMappedMemory(memory, offset, size); + return VK_SUCCESS; + } + virtual void invalidateMappedMemory(const GrVkBackendMemory&, VkDeviceSize, VkDeviceSize) {} + virtual VkResult invalidateMemory(const GrVkBackendMemory& memory, VkDeviceSize offset, + VkDeviceSize size) { + this->invalidateMappedMemory(memory, offset, size); + return VK_SUCCESS; + } + + virtual void freeMemory(const GrVkBackendMemory&) = 0; + + // Returns the total amount of memory that is allocated and in use by an allocation for this + // allocator. + virtual uint64_t totalUsedMemory() const = 0; + + // Returns the total amount of memory that is allocated by this allocator. + virtual uint64_t totalAllocatedMemory() const = 0; +}; + +GR_MAKE_BITFIELD_CLASS_OPS(GrVkMemoryAllocator::AllocationPropertyFlags) + +#endif diff --git a/src/deps/skia/include/gpu/vk/GrVkTypes.h b/src/deps/skia/include/gpu/vk/GrVkTypes.h new file mode 100644 index 000000000..7b95962fa --- /dev/null +++ b/src/deps/skia/include/gpu/vk/GrVkTypes.h @@ -0,0 +1,187 @@ + +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrVkTypes_DEFINED +#define GrVkTypes_DEFINED + +#include "include/core/SkTypes.h" +#include "include/gpu/vk/GrVkVulkan.h" + +#ifndef VK_VERSION_1_1 +#error Skia requires the use of Vulkan 1.1 headers +#endif + +#include <functional> +#include "include/gpu/GrTypes.h" + +typedef intptr_t GrVkBackendMemory; + +/** + * Types for interacting with Vulkan resources created externally to Skia. GrBackendObjects for + * Vulkan textures are really const GrVkImageInfo* + */ +struct GrVkAlloc { + // can be VK_NULL_HANDLE iff is an RT and is borrowed + VkDeviceMemory fMemory = VK_NULL_HANDLE; + VkDeviceSize fOffset = 0; + VkDeviceSize fSize = 0; // this can be indeterminate iff Tex uses borrow semantics + uint32_t fFlags = 0; + GrVkBackendMemory fBackendMemory = 0; // handle to memory allocated via GrVkMemoryAllocator. + + enum Flag { + kNoncoherent_Flag = 0x1, // memory must be flushed to device after mapping + kMappable_Flag = 0x2, // memory is able to be mapped. + kLazilyAllocated_Flag = 0x4, // memory was created with lazy allocation + }; + + bool operator==(const GrVkAlloc& that) const { + return fMemory == that.fMemory && fOffset == that.fOffset && fSize == that.fSize && + fFlags == that.fFlags && fUsesSystemHeap == that.fUsesSystemHeap; + } + +private: + friend class GrVkHeap; // For access to usesSystemHeap + bool fUsesSystemHeap = false; +}; + +// This struct is used to pass in the necessary information to create a VkSamplerYcbcrConversion +// object for an VkExternalFormatANDROID. +struct GrVkYcbcrConversionInfo { + bool operator==(const GrVkYcbcrConversionInfo& that) const { + // Invalid objects are not required to have all other fields initialized or matching. + if (!this->isValid() && !that.isValid()) { + return true; + } + return this->fFormat == that.fFormat && + this->fExternalFormat == that.fExternalFormat && + this->fYcbcrModel == that.fYcbcrModel && + this->fYcbcrRange == that.fYcbcrRange && + this->fXChromaOffset == that.fXChromaOffset && + this->fYChromaOffset == that.fYChromaOffset && + this->fChromaFilter == that.fChromaFilter && + this->fForceExplicitReconstruction == that.fForceExplicitReconstruction; + } + bool operator!=(const GrVkYcbcrConversionInfo& that) const { return !(*this == that); } + + bool isValid() const { return fYcbcrModel != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; } + + // Format of the source image. Must be set to VK_FORMAT_UNDEFINED for external images or + // a valid image format otherwise. + VkFormat fFormat = VK_FORMAT_UNDEFINED; + + // The external format. Must be non-zero for external images, zero otherwise. + // Should be compatible to be used in a VkExternalFormatANDROID struct. + uint64_t fExternalFormat = 0; + + VkSamplerYcbcrModelConversion fYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; + VkSamplerYcbcrRange fYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL; + VkChromaLocation fXChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN; + VkChromaLocation fYChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN; + VkFilter fChromaFilter = VK_FILTER_NEAREST; + VkBool32 fForceExplicitReconstruction = false; + + // For external images format features here should be those returned by a call to + // vkAndroidHardwareBufferFormatPropertiesANDROID + VkFormatFeatureFlags fFormatFeatures = 0; +}; + +/* + * When wrapping a GrBackendTexture or GrBackendRendenderTarget, the fCurrentQueueFamily should + * either be VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL, or VK_QUEUE_FAMILY_FOREIGN_EXT. If + * fSharingMode is VK_SHARING_MODE_EXCLUSIVE then fCurrentQueueFamily can also be the graphics + * queue index passed into Skia. + */ +struct GrVkImageInfo { + VkImage fImage = VK_NULL_HANDLE; + GrVkAlloc fAlloc; + VkImageTiling fImageTiling = VK_IMAGE_TILING_OPTIMAL; + VkImageLayout fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED; + VkFormat fFormat = VK_FORMAT_UNDEFINED; + VkImageUsageFlags fImageUsageFlags = 0; + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + uint32_t fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED; + GrProtected fProtected = GrProtected::kNo; + GrVkYcbcrConversionInfo fYcbcrConversionInfo; + VkSharingMode fSharingMode = VK_SHARING_MODE_EXCLUSIVE; +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + bool fPartOfSwapchainOrAndroidWindow = false; +#endif + +#if GR_TEST_UTILS + bool operator==(const GrVkImageInfo& that) const { + bool equal = fImage == that.fImage && fAlloc == that.fAlloc && + fImageTiling == that.fImageTiling && + fImageLayout == that.fImageLayout && + fFormat == that.fFormat && + fImageUsageFlags == that.fImageUsageFlags && + fSampleCount == that.fSampleCount && + fLevelCount == that.fLevelCount && + fCurrentQueueFamily == that.fCurrentQueueFamily && + fProtected == that.fProtected && + fYcbcrConversionInfo == that.fYcbcrConversionInfo && + fSharingMode == that.fSharingMode; +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + equal = equal && (fPartOfSwapchainOrAndroidWindow == that.fPartOfSwapchainOrAndroidWindow); +#endif + return equal; + } +#endif +}; + +using GrVkGetProc = std::function<PFN_vkVoidFunction( + const char*, // function name + VkInstance, // instance or VK_NULL_HANDLE + VkDevice // device or VK_NULL_HANDLE + )>; + +/** + * This object is wrapped in a GrBackendDrawableInfo and passed in as an argument to + * drawBackendGpu() calls on an SkDrawable. The drawable will use this info to inject direct + * Vulkan calls into our stream of GPU draws. + * + * The SkDrawable is given a secondary VkCommandBuffer in which to record draws. The GPU backend + * will then execute that command buffer within a render pass it is using for its own draws. The + * drawable is also given the attachment of the color index, a compatible VkRenderPass, and the + * VkFormat of the color attachment so that it can make VkPipeline objects for the draws. The + * SkDrawable must not alter the state of the VkRenderpass or sub pass. + * + * Additionally, the SkDrawable may fill in the passed in fDrawBounds with the bounds of the draws + * that it submits to the command buffer. This will be used by the GPU backend for setting the + * bounds in vkCmdBeginRenderPass. If fDrawBounds is not updated, we will assume that the entire + * attachment may have been written to. + * + * The SkDrawable is always allowed to create its own command buffers and submit them to the queue + * to render offscreen textures which will be sampled in draws added to the passed in + * VkCommandBuffer. If this is done the SkDrawable is in charge of adding the required memory + * barriers to the queue for the sampled images since the Skia backend will not do this. + */ +struct GrVkDrawableInfo { + VkCommandBuffer fSecondaryCommandBuffer; + uint32_t fColorAttachmentIndex; + VkRenderPass fCompatibleRenderPass; + VkFormat fFormat; + VkRect2D* fDrawBounds; +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + bool fFromSwapchainOrAndroidWindow; +#endif +}; + +struct GrVkSurfaceInfo { + uint32_t fSampleCount = 1; + uint32_t fLevelCount = 0; + GrProtected fProtected = GrProtected::kNo; + + VkImageTiling fImageTiling = VK_IMAGE_TILING_OPTIMAL; + VkFormat fFormat = VK_FORMAT_UNDEFINED; + VkImageUsageFlags fImageUsageFlags = 0; + GrVkYcbcrConversionInfo fYcbcrConversionInfo; + VkSharingMode fSharingMode = VK_SHARING_MODE_EXCLUSIVE; +}; + +#endif diff --git a/src/deps/skia/include/gpu/vk/GrVkVulkan.h b/src/deps/skia/include/gpu/vk/GrVkVulkan.h new file mode 100644 index 000000000..65cbf9b8b --- /dev/null +++ b/src/deps/skia/include/gpu/vk/GrVkVulkan.h @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrVkVulkan_DEFINED +#define GrVkVulkan_DEFINED + +#include "include/core/SkTypes.h" + +#if SKIA_IMPLEMENTATION || !defined(SK_VULKAN) +#include "include/third_party/vulkan/vulkan/vulkan_core.h" +#else +// For google3 builds we don't set SKIA_IMPLEMENTATION so we need to make sure that the vulkan +// headers stay up to date for our needs +#include <vulkan/vulkan_core.h> +#endif + +#ifdef SK_BUILD_FOR_ANDROID +// This is needed to get android extensions for external memory +#if SKIA_IMPLEMENTATION || !defined(SK_VULKAN) +#include "include/third_party/vulkan/vulkan/vulkan_android.h" +#else +// For google3 builds we don't set SKIA_IMPLEMENTATION so we need to make sure that the vulkan +// headers stay up to date for our needs +#include <vulkan/vulkan_android.h> +#endif +#endif + +#endif diff --git a/src/deps/skia/include/pathops/BUILD.bazel b/src/deps/skia/include/pathops/BUILD.bazel new file mode 100644 index 000000000..746a84434 --- /dev/null +++ b/src/deps/skia/include/pathops/BUILD.bazel @@ -0,0 +1,12 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkPathOps_hdr", + hdrs = ["SkPathOps.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkTypes_hdr", + "//include/private:SkTArray_hdr", + "//include/private:SkTDArray_hdr", + ], +) diff --git a/src/deps/skia/include/pathops/SkPathOps.h b/src/deps/skia/include/pathops/SkPathOps.h new file mode 100644 index 000000000..756533257 --- /dev/null +++ b/src/deps/skia/include/pathops/SkPathOps.h @@ -0,0 +1,113 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef SkPathOps_DEFINED +#define SkPathOps_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkTArray.h" +#include "include/private/SkTDArray.h" + +class SkPath; +struct SkRect; + + +// FIXME: move everything below into the SkPath class +/** + * The logical operations that can be performed when combining two paths. + */ +enum SkPathOp { + kDifference_SkPathOp, //!< subtract the op path from the first path + kIntersect_SkPathOp, //!< intersect the two paths + kUnion_SkPathOp, //!< union (inclusive-or) the two paths + kXOR_SkPathOp, //!< exclusive-or the two paths + kReverseDifference_SkPathOp, //!< subtract the first path from the op path +}; + +/** Set this path to the result of applying the Op to this path and the + specified path: this = (this op operand). + The resulting path will be constructed from non-overlapping contours. + The curve order is reduced where possible so that cubics may be turned + into quadratics, and quadratics maybe turned into lines. + + Returns true if operation was able to produce a result; + otherwise, result is unmodified. + + @param one The first operand (for difference, the minuend) + @param two The second operand (for difference, the subtrahend) + @param op The operator to apply. + @param result The product of the operands. The result may be one of the + inputs. + @return True if the operation succeeded. + */ +bool SK_API Op(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result); + +/** Set this path to a set of non-overlapping contours that describe the + same area as the original path. + The curve order is reduced where possible so that cubics may + be turned into quadratics, and quadratics maybe turned into lines. + + Returns true if operation was able to produce a result; + otherwise, result is unmodified. + + @param path The path to simplify. + @param result The simplified path. The result may be the input. + @return True if simplification succeeded. + */ +bool SK_API Simplify(const SkPath& path, SkPath* result); + +/** Set the resulting rectangle to the tight bounds of the path. + + @param path The path measured. + @param result The tight bounds of the path. + @return True if the bounds could be computed. + */ +bool SK_API TightBounds(const SkPath& path, SkRect* result); + +/** Set the result with fill type winding to area equivalent to path. + Returns true if successful. Does not detect if path contains contours which + contain self-crossings or cross other contours; in these cases, may return + true even though result does not fill same area as path. + + Returns true if operation was able to produce a result; + otherwise, result is unmodified. The result may be the input. + + @param path The path typically with fill type set to even odd. + @param result The equivalent path with fill type set to winding. + @return True if winding path was set. + */ +bool SK_API AsWinding(const SkPath& path, SkPath* result); + +/** Perform a series of path operations, optimized for unioning many paths together. + */ +class SK_API SkOpBuilder { +public: + /** Add one or more paths and their operand. The builder is empty before the first + path is added, so the result of a single add is (emptyPath OP path). + + @param path The second operand. + @param _operator The operator to apply to the existing and supplied paths. + */ + void add(const SkPath& path, SkPathOp _operator); + + /** Computes the sum of all paths and operands, and resets the builder to its + initial state. + + @param result The product of the operands. + @return True if the operation succeeded. + */ + bool resolve(SkPath* result); + +private: + SkTArray<SkPath> fPathRefs; + SkTDArray<SkPathOp> fOps; + + static bool FixWinding(SkPath* path); + static void ReversePath(SkPath* path); + void reset(); +}; + +#endif diff --git a/src/deps/skia/include/ports/BUILD.bazel b/src/deps/skia/include/ports/BUILD.bazel new file mode 100644 index 000000000..4b9b65e07 --- /dev/null +++ b/src/deps/skia/include/ports/BUILD.bazel @@ -0,0 +1,159 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkCFObject_hdr", + hdrs = ["SkCFObject.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkFontConfigInterface_hdr", + hdrs = ["SkFontConfigInterface.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkFontStyle_hdr", + "//include/core:SkRefCnt_hdr", + "//include/core:SkStream_hdr", + "//include/core:SkTypeface_hdr", + ], +) + +generated_cc_atom( + name = "SkFontMgr_FontConfigInterface_hdr", + hdrs = ["SkFontMgr_FontConfigInterface.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkFontMgr_android_hdr", + hdrs = ["SkFontMgr_android.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkRefCnt_hdr"], +) + +generated_cc_atom( + name = "SkFontMgr_directory_hdr", + hdrs = ["SkFontMgr_directory.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkFontMgr_empty_hdr", + hdrs = ["SkFontMgr_empty.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkFontMgr_fontconfig_hdr", + hdrs = ["SkFontMgr_fontconfig.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkRefCnt_hdr"], +) + +generated_cc_atom( + name = "SkFontMgr_fuchsia_hdr", + hdrs = ["SkFontMgr_fuchsia.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkRefCnt_hdr"], +) + +generated_cc_atom( + name = "SkFontMgr_indirect_hdr", + hdrs = ["SkFontMgr_indirect.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkRemotableFontMgr_hdr", + "//include/core:SkFontMgr_hdr", + "//include/core:SkRefCnt_hdr", + "//include/core:SkTypeface_hdr", + "//include/core:SkTypes_hdr", + "//include/private:SkMutex_hdr", + "//include/private:SkOnce_hdr", + "//include/private:SkTArray_hdr", + ], +) + +generated_cc_atom( + name = "SkFontMgr_mac_ct_hdr", + hdrs = ["SkFontMgr_mac_ct.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkImageGeneratorCG_hdr", + hdrs = ["SkImageGeneratorCG.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkData_hdr", + "//include/core:SkImageGenerator_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkImageGeneratorNDK_hdr", + hdrs = ["SkImageGeneratorNDK.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkData_hdr", + "//include/core:SkImageGenerator_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkImageGeneratorWIC_hdr", + hdrs = ["SkImageGeneratorWIC.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkData_hdr", + "//include/core:SkImageGenerator_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkRemotableFontMgr_hdr", + hdrs = ["SkRemotableFontMgr.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkFontStyle_hdr", + "//include/core:SkRefCnt_hdr", + "//include/core:SkTypes_hdr", + "//include/private:SkTemplates_hdr", + ], +) + +generated_cc_atom( + name = "SkTypeface_mac_hdr", + hdrs = ["SkTypeface_mac.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypeface_hdr"], +) + +generated_cc_atom( + name = "SkTypeface_win_hdr", + hdrs = ["SkTypeface_win.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkTypeface_hdr", + "//include/core:SkTypes_hdr", + ], +) diff --git a/src/deps/skia/include/ports/SkCFObject.h b/src/deps/skia/include/ports/SkCFObject.h new file mode 100644 index 000000000..4dc70863f --- /dev/null +++ b/src/deps/skia/include/ports/SkCFObject.h @@ -0,0 +1,184 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCFObject_DEFINED +#define SkCFObject_DEFINED + +#ifdef __APPLE__ + +#include "include/core/SkTypes.h" + +#include <cstddef> // std::nullptr_t + +#import <CoreFoundation/CoreFoundation.h> + +/** + * Wrapper class for managing lifetime of CoreFoundation objects. It will call + * CFRetain and CFRelease appropriately on creation, assignment, and deletion. + * Based on sk_sp<>. + */ +template <typename T> static inline T SkCFSafeRetain(T obj) { + if (obj) { + CFRetain(obj); + } + return obj; +} + +template <typename T> static inline void SkCFSafeRelease(T obj) { + if (obj) { + CFRelease(obj); + } +} + +template <typename T> class sk_cfp { +public: + using element_type = T; + + constexpr sk_cfp() {} + constexpr sk_cfp(std::nullptr_t) {} + + /** + * Shares the underlying object by calling CFRetain(), so that both the argument and the newly + * created sk_cfp both have a reference to it. + */ + sk_cfp(const sk_cfp<T>& that) : fObject(SkCFSafeRetain(that.get())) {} + + /** + * Move the underlying object from the argument to the newly created sk_cfp. Afterwards only + * the new sk_cfp will have a reference to the object, and the argument will point to null. + * No call to CFRetain() or CFRelease() will be made. + */ + sk_cfp(sk_cfp<T>&& that) : fObject(that.release()) {} + + /** + * Adopt the bare object into the newly created sk_cfp. + * No call to CFRetain() or CFRelease() will be made. + */ + explicit sk_cfp(T obj) { + fObject = obj; + } + + /** + * Calls CFRelease() on the underlying object pointer. + */ + ~sk_cfp() { + SkCFSafeRelease(fObject); + SkDEBUGCODE(fObject = nil); + } + + sk_cfp<T>& operator=(std::nullptr_t) { this->reset(); return *this; } + + /** + * Shares the underlying object referenced by the argument by calling CFRetain() on it. If this + * sk_cfp previously had a reference to an object (i.e. not null) it will call CFRelease() + * on that object. + */ + sk_cfp<T>& operator=(const sk_cfp<T>& that) { + if (this != &that) { + this->reset(SkCFSafeRetain(that.get())); + } + return *this; + } + + /** + * Move the underlying object from the argument to the sk_cfp. If the sk_cfp + * previously held a reference to another object, CFRelease() will be called on that object. + * No call to CFRetain() will be made. + */ + sk_cfp<T>& operator=(sk_cfp<T>&& that) { + this->reset(that.release()); + return *this; + } + + explicit operator bool() const { return this->get() != nil; } + + T get() const { return fObject; } + T operator*() const { + SkASSERT(fObject); + return fObject; + } + + /** + * Adopt the new object, and call CFRelease() on any previously held object (if not null). + * No call to CFRetain() will be made. + */ + void reset(T object = nil) { + // Need to unref after assigning, see + // http://wg21.cmeerw.net/lwg/issue998 + // http://wg21.cmeerw.net/lwg/issue2262 + T oldObject = fObject; + fObject = object; + SkCFSafeRelease(oldObject); + } + + /** + * Shares the new object by calling CFRetain() on it. If this sk_cfp previously had a + * reference to an object (i.e. not null) it will call CFRelease() on that object. + */ + void retain(T object) { + if (fObject != object) { + this->reset(SkCFSafeRetain(object)); + } + } + + /** + * Return the original object, and set the internal object to nullptr. + * The caller must assume ownership of the object, and manage its reference count directly. + * No call to CFRelease() will be made. + */ + T SK_WARN_UNUSED_RESULT release() { + T obj = fObject; + fObject = nil; + return obj; + } + +private: + T fObject = nil; +}; + +template <typename T> inline bool operator==(const sk_cfp<T>& a, + const sk_cfp<T>& b) { + return a.get() == b.get(); +} +template <typename T> inline bool operator==(const sk_cfp<T>& a, + std::nullptr_t) { + return !a; +} +template <typename T> inline bool operator==(std::nullptr_t, + const sk_cfp<T>& b) { + return !b; +} + +template <typename T> inline bool operator!=(const sk_cfp<T>& a, + const sk_cfp<T>& b) { + return a.get() != b.get(); +} +template <typename T> inline bool operator!=(const sk_cfp<T>& a, + std::nullptr_t) { + return static_cast<bool>(a); +} +template <typename T> inline bool operator!=(std::nullptr_t, + const sk_cfp<T>& b) { + return static_cast<bool>(b); +} + +/* + * Returns a sk_cfp wrapping the provided object AND calls retain on it (if not null). + * + * This is different than the semantics of the constructor for sk_cfp, which just wraps the + * object, effectively "adopting" it. + */ +template <typename T> sk_cfp<T> sk_ret_cfp(T obj) { + return sk_cfp<T>(SkCFSafeRetain(obj)); +} + +// For Flutter. +// TODO: migrate them away from this and remove +template <typename T> using sk_cf_obj = sk_cfp<T>; + +#endif // __APPLE__ +#endif // SkCFObject_DEFINED diff --git a/src/deps/skia/include/ports/SkFontConfigInterface.h b/src/deps/skia/include/ports/SkFontConfigInterface.h new file mode 100644 index 000000000..65fd61259 --- /dev/null +++ b/src/deps/skia/include/ports/SkFontConfigInterface.h @@ -0,0 +1,115 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontConfigInterface_DEFINED +#define SkFontConfigInterface_DEFINED + +#include "include/core/SkFontStyle.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkStream.h" +#include "include/core/SkTypeface.h" + +class SkFontMgr; + +/** + * \class SkFontConfigInterface + * + * A simple interface for remotable font management. + * The global instance can be found with RefGlobal(). + */ +class SK_API SkFontConfigInterface : public SkRefCnt { +public: + + /** + * Returns the global SkFontConfigInterface instance. If it is not + * nullptr, calls ref() on it. The caller must balance this with a call to + * unref(). The default SkFontConfigInterface is the result of calling + * GetSingletonDirectInterface. + */ + static sk_sp<SkFontConfigInterface> RefGlobal(); + + /** + * Replace the current global instance with the specified one. + */ + static void SetGlobal(sk_sp<SkFontConfigInterface> fc); + + /** + * This should be treated as private to the impl of SkFontConfigInterface. + * Callers should not change or expect any particular values. It is meant + * to be a union of possible storage types to aid the impl. + */ + struct FontIdentity { + FontIdentity() : fID(0), fTTCIndex(0) {} + + bool operator==(const FontIdentity& other) const { + return fID == other.fID && + fTTCIndex == other.fTTCIndex && + fString == other.fString; + } + bool operator!=(const FontIdentity& other) const { + return !(*this == other); + } + + uint32_t fID; + int32_t fTTCIndex; + SkString fString; + SkFontStyle fStyle; + + // If buffer is NULL, just return the number of bytes that would have + // been written. Will pad contents to a multiple of 4. + size_t writeToMemory(void* buffer = nullptr) const; + + // Recreate from a flattened buffer, returning the number of bytes read. + size_t readFromMemory(const void* buffer, size_t length); + }; + + /** + * Given a familyName and style, find the best match. + * + * If a match is found, return true and set its outFontIdentifier. + * If outFamilyName is not null, assign the found familyName to it + * (which may differ from the requested familyName). + * If outStyle is not null, assign the found style to it + * (which may differ from the requested style). + * + * If a match is not found, return false, and ignore all out parameters. + */ + virtual bool matchFamilyName(const char familyName[], + SkFontStyle requested, + FontIdentity* outFontIdentifier, + SkString* outFamilyName, + SkFontStyle* outStyle) = 0; + + /** + * Given a FontRef, open a stream to access its data, or return null + * if the FontRef's data is not available. The caller is responsible for + * deleting the stream when it is done accessing the data. + */ + virtual SkStreamAsset* openStream(const FontIdentity&) = 0; + + /** + * Return an SkTypeface for the given FontIdentity. + * + * The default implementation simply returns a new typeface built using data obtained from + * openStream(), but derived classes may implement more complex caching schemes. + */ + virtual sk_sp<SkTypeface> makeTypeface(const FontIdentity& identity) { + return SkTypeface::MakeFromStream(std::unique_ptr<SkStreamAsset>(this->openStream(identity)), + identity.fTTCIndex); + + } + + /** + * Return a singleton instance of a direct subclass that calls into + * libfontconfig. This does not affect the refcnt of the returned instance. + */ + static SkFontConfigInterface* GetSingletonDirectInterface(); + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/ports/SkFontMgr_FontConfigInterface.h b/src/deps/skia/include/ports/SkFontMgr_FontConfigInterface.h new file mode 100644 index 000000000..05771257d --- /dev/null +++ b/src/deps/skia/include/ports/SkFontMgr_FontConfigInterface.h @@ -0,0 +1,20 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_FontConfigInterface_DEFINED +#define SkFontMgr_FontConfigInterface_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" + +class SkFontMgr; +class SkFontConfigInterface; + +/** Creates a SkFontMgr which wraps a SkFontConfigInterface. */ +SK_API sk_sp<SkFontMgr> SkFontMgr_New_FCI(sk_sp<SkFontConfigInterface> fci); + +#endif // #ifndef SkFontMgr_FontConfigInterface_DEFINED diff --git a/src/deps/skia/include/ports/SkFontMgr_android.h b/src/deps/skia/include/ports/SkFontMgr_android.h new file mode 100644 index 000000000..d68f3ba03 --- /dev/null +++ b/src/deps/skia/include/ports/SkFontMgr_android.h @@ -0,0 +1,45 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_android_DEFINED +#define SkFontMgr_android_DEFINED + +#include "include/core/SkRefCnt.h" + +class SkFontMgr; + +struct SkFontMgr_Android_CustomFonts { + /** When specifying custom fonts, indicates how to use system fonts. */ + enum SystemFontUse { + kOnlyCustom, /** Use only custom fonts. NDK compliant. */ + kPreferCustom, /** Use custom fonts before system fonts. */ + kPreferSystem /** Use system fonts before custom fonts. */ + }; + /** Whether or not to use system fonts. */ + SystemFontUse fSystemFontUse; + + /** Base path to resolve relative font file names. If a directory, should end with '/'. */ + const char* fBasePath; + + /** Optional custom configuration file to use. */ + const char* fFontsXml; + + /** Optional custom configuration file for fonts which provide fallback. + * In the new style (version > 21) fontsXml format is used, this should be NULL. + */ + const char* fFallbackFontsXml; + + /** Optional custom flag. If set to true the SkFontMgr will acquire all requisite + * system IO resources on initialization. + */ + bool fIsolated; +}; + +/** Create a font manager for Android. If 'custom' is NULL, use only system fonts. */ +SK_API sk_sp<SkFontMgr> SkFontMgr_New_Android(const SkFontMgr_Android_CustomFonts* custom); + +#endif // SkFontMgr_android_DEFINED diff --git a/src/deps/skia/include/ports/SkFontMgr_directory.h b/src/deps/skia/include/ports/SkFontMgr_directory.h new file mode 100644 index 000000000..b1a60fb4d --- /dev/null +++ b/src/deps/skia/include/ports/SkFontMgr_directory.h @@ -0,0 +1,21 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_directory_DEFINED +#define SkFontMgr_directory_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" + +class SkFontMgr; + +/** Create a custom font manager which scans a given directory for font files. + * This font manager uses FreeType for rendering. + */ +SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Directory(const char* dir); + +#endif // SkFontMgr_directory_DEFINED diff --git a/src/deps/skia/include/ports/SkFontMgr_empty.h b/src/deps/skia/include/ports/SkFontMgr_empty.h new file mode 100644 index 000000000..e5756421d --- /dev/null +++ b/src/deps/skia/include/ports/SkFontMgr_empty.h @@ -0,0 +1,21 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_empty_DEFINED +#define SkFontMgr_empty_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" + +class SkFontMgr; + +/** Create a custom font manager that contains no built-in fonts. + * This font manager uses FreeType for rendering. + */ +SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Empty(); + +#endif // SkFontMgr_empty_DEFINED diff --git a/src/deps/skia/include/ports/SkFontMgr_fontconfig.h b/src/deps/skia/include/ports/SkFontMgr_fontconfig.h new file mode 100644 index 000000000..4b2bb2d29 --- /dev/null +++ b/src/deps/skia/include/ports/SkFontMgr_fontconfig.h @@ -0,0 +1,22 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_fontconfig_DEFINED +#define SkFontMgr_fontconfig_DEFINED + +#include "include/core/SkRefCnt.h" +#include <fontconfig/fontconfig.h> + +class SkFontMgr; + +/** Create a font manager around a FontConfig instance. + * If 'fc' is NULL, will use a new default config. + * Takes ownership of 'fc' and will call FcConfigDestroy on it. + */ +SK_API sk_sp<SkFontMgr> SkFontMgr_New_FontConfig(FcConfig* fc); + +#endif // #ifndef SkFontMgr_fontconfig_DEFINED diff --git a/src/deps/skia/include/ports/SkFontMgr_fuchsia.h b/src/deps/skia/include/ports/SkFontMgr_fuchsia.h new file mode 100644 index 000000000..d20530af7 --- /dev/null +++ b/src/deps/skia/include/ports/SkFontMgr_fuchsia.h @@ -0,0 +1,19 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_fuchsia_DEFINED +#define SkFontMgr_fuchsia_DEFINED + +#include <fuchsia/fonts/cpp/fidl.h> + +#include "include/core/SkRefCnt.h" + +class SkFontMgr; + +SK_API sk_sp<SkFontMgr> SkFontMgr_New_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider); + +#endif // SkFontMgr_fuchsia_DEFINED diff --git a/src/deps/skia/include/ports/SkFontMgr_indirect.h b/src/deps/skia/include/ports/SkFontMgr_indirect.h new file mode 100644 index 000000000..73210bc9a --- /dev/null +++ b/src/deps/skia/include/ports/SkFontMgr_indirect.h @@ -0,0 +1,102 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_indirect_DEFINED +#define SkFontMgr_indirect_DEFINED + +#include "include/core/SkFontMgr.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypeface.h" +#include "include/core/SkTypes.h" +#include "include/ports/SkRemotableFontMgr.h" +#include "include/private/SkMutex.h" +#include "include/private/SkOnce.h" +#include "include/private/SkTArray.h" + +class SkData; +class SkFontStyle; +class SkStreamAsset; +class SkString; + +class SK_API SkFontMgr_Indirect : public SkFontMgr { +public: + // TODO: The SkFontMgr is only used for createFromStream/File/Data. + // In the future these calls should be broken out into their own interface + // with a name like SkFontRenderer. + SkFontMgr_Indirect(sk_sp<SkFontMgr> impl, sk_sp<SkRemotableFontMgr> proxy) + : fImpl(std::move(impl)), fProxy(std::move(proxy)) + { } + +protected: + int onCountFamilies() const override; + void onGetFamilyName(int index, SkString* familyName) const override; + SkFontStyleSet* onCreateStyleSet(int index) const override; + + SkFontStyleSet* onMatchFamily(const char familyName[]) const override; + + SkTypeface* onMatchFamilyStyle(const char familyName[], + const SkFontStyle& fontStyle) const override; + + SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], + const SkFontStyle&, + const char* bcp47[], + int bcp47Count, + SkUnichar character) const override; + + sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int ttcIndex) const override; + sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream, + const SkFontArguments& args) const override; + sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override; + sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override; + sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override; + +private: + SkTypeface* createTypefaceFromFontId(const SkFontIdentity& fontId) const; + + sk_sp<SkFontMgr> fImpl; + sk_sp<SkRemotableFontMgr> fProxy; + + struct DataEntry { + uint32_t fDataId; // key1 + uint32_t fTtcIndex; // key2 + SkTypeface* fTypeface; // value: weak ref to typeface + + DataEntry() = default; + + DataEntry(DataEntry&& that) { *this = std::move(that); } + DataEntry& operator=(DataEntry&& that) { + if (this != &that) { + fDataId = that.fDataId; + fTtcIndex = that.fTtcIndex; + fTypeface = that.fTypeface; + + SkDEBUGCODE(that.fDataId = SkFontIdentity::kInvalidDataId;) + SkDEBUGCODE(that.fTtcIndex = 0xbbadbeef;) + that.fTypeface = nullptr; + } + return *this; + } + + ~DataEntry() { + if (fTypeface) { + fTypeface->weak_unref(); + } + } + }; + /** + * This cache is essentially { dataId: { ttcIndex: typeface } } + * For data caching we want a mapping from data id to weak references to + * typefaces with that data id. By storing the index next to the typeface, + * this data cache also acts as a typeface cache. + */ + mutable SkTArray<DataEntry> fDataCache; + mutable SkMutex fDataCacheMutex; + + friend class SkStyleSet_Indirect; +}; + +#endif diff --git a/src/deps/skia/include/ports/SkFontMgr_mac_ct.h b/src/deps/skia/include/ports/SkFontMgr_mac_ct.h new file mode 100644 index 000000000..45cba65b5 --- /dev/null +++ b/src/deps/skia/include/ports/SkFontMgr_mac_ct.h @@ -0,0 +1,27 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFontMgr_mac_ct_DEFINED +#define SkFontMgr_mac_ct_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" + +#ifdef SK_BUILD_FOR_MAC +#import <ApplicationServices/ApplicationServices.h> +#endif + +#ifdef SK_BUILD_FOR_IOS +#include <CoreText/CoreText.h> +#endif + +class SkFontMgr; + +/** Create a font manager for CoreText. If the collection is nullptr the system default will be used. */ +SK_API extern sk_sp<SkFontMgr> SkFontMgr_New_CoreText(CTFontCollectionRef); + +#endif // SkFontMgr_mac_ct_DEFINED diff --git a/src/deps/skia/include/ports/SkImageGeneratorCG.h b/src/deps/skia/include/ports/SkImageGeneratorCG.h new file mode 100644 index 000000000..93592cde4 --- /dev/null +++ b/src/deps/skia/include/ports/SkImageGeneratorCG.h @@ -0,0 +1,20 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "include/core/SkTypes.h" +#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS) + +#include "include/core/SkData.h" +#include "include/core/SkImageGenerator.h" + +#include <memory> + +namespace SkImageGeneratorCG { +SK_API std::unique_ptr<SkImageGenerator> MakeFromEncodedCG(sk_sp<SkData>); +} // namespace SkImageGeneratorCG + +#endif //defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS) diff --git a/src/deps/skia/include/ports/SkImageGeneratorNDK.h b/src/deps/skia/include/ports/SkImageGeneratorNDK.h new file mode 100644 index 000000000..739a586f0 --- /dev/null +++ b/src/deps/skia/include/ports/SkImageGeneratorNDK.h @@ -0,0 +1,40 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImageGeneratorNDK_DEFINED +#define SkImageGeneratorNDK_DEFINED + +#include "include/core/SkTypes.h" +#ifdef SK_ENABLE_NDK_IMAGES + +#include "include/core/SkData.h" +#include "include/core/SkImageGenerator.h" + +#include <memory> + +namespace SkImageGeneratorNDK { +/** + * Create a generator that uses the Android NDK's APIs for decoding images. + * + * Only supported on devices where __ANDROID_API__ >= 30. + * + * As with SkCodec, the SkColorSpace passed to getPixels() determines the + * type of color space transformations to apply. A null SkColorSpace means to + * apply none. + * + * A note on scaling: Calling getPixels() on the resulting SkImageGenerator + * with dimensions that do not match getInfo() requests a scale. For WebP + * files, dimensions smaller than those of getInfo are supported. For Jpeg + * files, dimensions of 1/2, 1/4, and 1/8 are supported. TODO: Provide an + * API like SkCodecImageGenerator::getScaledDimensions() to report which + * dimensions are supported? + */ +SK_API std::unique_ptr<SkImageGenerator> MakeFromEncodedNDK(sk_sp<SkData>); +} + +#endif // SK_ENABLE_NDK_IMAGES +#endif // SkImageGeneratorNDK_DEFINED diff --git a/src/deps/skia/include/ports/SkImageGeneratorWIC.h b/src/deps/skia/include/ports/SkImageGeneratorWIC.h new file mode 100644 index 000000000..eb57a2095 --- /dev/null +++ b/src/deps/skia/include/ports/SkImageGeneratorWIC.h @@ -0,0 +1,35 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "include/core/SkTypes.h" + +#if defined(SK_BUILD_FOR_WIN) + +#include "include/core/SkData.h" +#include "include/core/SkImageGenerator.h" + +#include <memory> + +/* + * Any Windows program that uses COM must initialize the COM library by calling + * the CoInitializeEx function. In addition, each thread that uses a COM + * interface must make a separate call to this function. + * + * For every successful call to CoInitializeEx, the thread must call + * CoUninitialize before it exits. + * + * SkImageGeneratorWIC requires the COM library and leaves it to the client to + * initialize COM for their application. + * + * For more information on initializing COM, please see: + * https://msdn.microsoft.com/en-us/library/windows/desktop/ff485844.aspx + */ +namespace SkImageGeneratorWIC { +SK_API std::unique_ptr<SkImageGenerator> MakeFromEncodedWIC(sk_sp<SkData>); +} + +#endif // SK_BUILD_FOR_WIN diff --git a/src/deps/skia/include/ports/SkRemotableFontMgr.h b/src/deps/skia/include/ports/SkRemotableFontMgr.h new file mode 100644 index 000000000..8017b77cd --- /dev/null +++ b/src/deps/skia/include/ports/SkRemotableFontMgr.h @@ -0,0 +1,139 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRemotableFontMgr_DEFINED +#define SkRemotableFontMgr_DEFINED + +#include "include/core/SkFontStyle.h" +#include "include/core/SkRefCnt.h" +#include "include/core/SkTypes.h" +#include "include/private/SkTemplates.h" + +class SkDataTable; +class SkStreamAsset; + +struct SK_API SkFontIdentity { + static const uint32_t kInvalidDataId = 0xFFFFFFFF; + + // Note that fDataId is a data identifier, not a font identifier. + // (fDataID, fTtcIndex) can be seen as a font identifier. + uint32_t fDataId; + uint32_t fTtcIndex; + + // On Linux/FontConfig there is also the ability to specify preferences for rendering + // antialias, embedded bitmaps, autohint, hinting, hintstyle, lcd rendering + // may all be set or set to no-preference + // (No-preference is resolved against globals set by the platform) + // Since they may be selected against, these are really 'extensions' to SkFontStyle. + // SkFontStyle should pick these up. + SkFontStyle fFontStyle; +}; + +class SK_API SkRemotableFontIdentitySet : public SkRefCnt { +public: + SkRemotableFontIdentitySet(int count, SkFontIdentity** data); + + int count() const { return fCount; } + const SkFontIdentity& at(int index) const { return fData[index]; } + + static SkRemotableFontIdentitySet* NewEmpty(); + +private: + SkRemotableFontIdentitySet() : fCount(0), fData() { } + + friend SkRemotableFontIdentitySet* sk_remotable_font_identity_set_new(); + + int fCount; + SkAutoTArray<SkFontIdentity> fData; + + using INHERITED = SkRefCnt; +}; + +class SK_API SkRemotableFontMgr : public SkRefCnt { +public: + /** + * Returns all of the fonts with the given familyIndex. + * Returns NULL if the index is out of bounds. + * Returns empty if there are no fonts at the given index. + * + * The caller must unref() the returned object. + */ + virtual SkRemotableFontIdentitySet* getIndex(int familyIndex) const = 0; + + /** + * Returns the closest match to the given style in the given index. + * If there are no available fonts at the given index, the return value's + * data id will be kInvalidDataId. + */ + virtual SkFontIdentity matchIndexStyle(int familyIndex, const SkFontStyle&) const = 0; + + /** + * Returns all the fonts on the system with the given name. + * If the given name is NULL, will return the default font family. + * Never returns NULL; will return an empty set if the name is not found. + * + * It is possible that this will return fonts not accessible from + * getIndex(int) or matchIndexStyle(int, SkFontStyle) due to + * hidden or auto-activated fonts. + * + * The matching may be done in a system dependent way. The name may be + * matched case-insensitive, there may be system aliases which resolve, + * and names outside the current locale may be considered. However, this + * should only return fonts which are somehow associated with the requested + * name. + * + * The caller must unref() the returned object. + */ + virtual SkRemotableFontIdentitySet* matchName(const char familyName[]) const = 0; + + /** + * Returns the closest matching font to the specified name and style. + * If there are no available fonts which match the name, the return value's + * data id will be kInvalidDataId. + * If the given name is NULL, the match will be against any default fonts. + * + * It is possible that this will return a font identity not accessible from + * methods returning sets due to hidden or auto-activated fonts. + * + * The matching may be done in a system dependent way. The name may be + * matched case-insensitive, there may be system aliases which resolve, + * and names outside the current locale may be considered. However, this + * should only return a font which is somehow associated with the requested + * name. + * + * The caller must unref() the returned object. + */ + virtual SkFontIdentity matchNameStyle(const char familyName[], const SkFontStyle&) const = 0; + + /** + * Use the system fall-back to find a font for the given character. + * If no font can be found for the character, the return value's data id + * will be kInvalidDataId. + * If the name is NULL, the match will start against any default fonts. + * If the bpc47 is NULL, a default locale will be assumed. + * + * Note that bpc47 is a combination of ISO 639, 15924, and 3166-1 codes, + * so it is fine to just pass a ISO 639 here. + */ + virtual SkFontIdentity matchNameStyleCharacter(const char familyName[], const SkFontStyle&, + const char* bcp47[], int bcp47Count, + SkUnichar character) const=0; + + /** + * Returns the data for the given data id. + * Will return NULL if the data id is invalid. + * Note that this is a data id, not a font id. + * + * The caller must unref() the returned object. + */ + virtual SkStreamAsset* getData(int dataId) const = 0; + +private: + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/ports/SkTypeface_mac.h b/src/deps/skia/include/ports/SkTypeface_mac.h new file mode 100644 index 000000000..ec68e0549 --- /dev/null +++ b/src/deps/skia/include/ports/SkTypeface_mac.h @@ -0,0 +1,44 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTypeface_mac_DEFINED +#define SkTypeface_mac_DEFINED + +#include "include/core/SkTypeface.h" + +#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS) + +#include <CoreFoundation/CoreFoundation.h> + +#ifdef SK_BUILD_FOR_MAC +#import <ApplicationServices/ApplicationServices.h> +#endif + +#ifdef SK_BUILD_FOR_IOS +#include <CoreText/CoreText.h> +#endif + +/** + * Like the other Typeface make methods, this returns a new reference to the + * corresponding typeface for the specified CTFontRef. + */ +SK_API extern sk_sp<SkTypeface> SkMakeTypefaceFromCTFont(CTFontRef); + +/** + * Returns the platform-specific CTFontRef handle for a + * given SkTypeface. Note that the returned CTFontRef gets + * released when the source SkTypeface is destroyed. + * + * This method is deprecated. It may only be used by Blink Mac + * legacy code in special cases related to text-shaping + * with AAT fonts, clipboard handling and font fallback. + * See https://code.google.com/p/skia/issues/detail?id=3408 + */ +SK_API extern CTFontRef SkTypeface_GetCTFontRef(const SkTypeface* face); + +#endif // defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS) +#endif // SkTypeface_mac_DEFINED diff --git a/src/deps/skia/include/ports/SkTypeface_win.h b/src/deps/skia/include/ports/SkTypeface_win.h new file mode 100644 index 000000000..f659adf0e --- /dev/null +++ b/src/deps/skia/include/ports/SkTypeface_win.h @@ -0,0 +1,79 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTypeface_win_DEFINED +#define SkTypeface_win_DEFINED + +#include "include/core/SkTypeface.h" +#include "include/core/SkTypes.h" + +#ifdef SK_BUILD_FOR_WIN + +#ifdef UNICODE +typedef struct tagLOGFONTW LOGFONTW; +typedef LOGFONTW LOGFONT; +#else +typedef struct tagLOGFONTA LOGFONTA; +typedef LOGFONTA LOGFONT; +#endif // UNICODE + +/** + * Like the other Typeface create methods, this returns a new reference to the + * corresponding typeface for the specified logfont. The caller is responsible + * for calling unref() when it is finished. + */ +SK_API SkTypeface* SkCreateTypefaceFromLOGFONT(const LOGFONT&); + +/** + * Copy the LOGFONT associated with this typeface into the lf parameter. Note + * that the lfHeight will need to be set afterwards, since the typeface does + * not track this (the paint does). + * typeface may be NULL, in which case we return the logfont for the default font. + */ +SK_API void SkLOGFONTFromTypeface(const SkTypeface* typeface, LOGFONT* lf); + +/** + * Set an optional callback to ensure that the data behind a LOGFONT is loaded. + * This will get called if Skia tries to access the data but hits a failure. + * Normally this is null, and is only required if the font data needs to be + * remotely (re)loaded. + */ +SK_API void SkTypeface_SetEnsureLOGFONTAccessibleProc(void (*)(const LOGFONT&)); + +// Experimental! +// +class SkFontMgr; +class SkRemotableFontMgr; +struct IDWriteFactory; +struct IDWriteFontCollection; +struct IDWriteFontFallback; + +SK_API sk_sp<SkFontMgr> SkFontMgr_New_GDI(); +SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory = NULL, + IDWriteFontCollection* collection = NULL); +SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory, + IDWriteFontCollection* collection, + IDWriteFontFallback* fallback); + +/** + * Creates an SkFontMgr which renders using DirectWrite and obtains its data + * from the SkRemotableFontMgr. + * + * If DirectWrite could not be initialized, will return NULL. + */ +SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWriteRenderer(sk_sp<SkRemotableFontMgr>); + +/** + * Creates an SkRemotableFontMgr backed by DirectWrite using the default + * system font collection in the current locale. + * + * If DirectWrite could not be initialized, will return NULL. + */ +SK_API sk_sp<SkRemotableFontMgr> SkRemotableFontMgr_New_DirectWrite(); + +#endif // SK_BUILD_FOR_WIN +#endif // SkTypeface_win_DEFINED diff --git a/src/deps/skia/include/private/BUILD.bazel b/src/deps/skia/include/private/BUILD.bazel new file mode 100644 index 000000000..5d338ddf9 --- /dev/null +++ b/src/deps/skia/include/private/BUILD.bazel @@ -0,0 +1,572 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrContext_Base_hdr", + hdrs = ["GrContext_Base.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/gpu:GrBackendSurface_hdr", + "//include/gpu:GrContextOptions_hdr", + "//include/gpu:GrTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrD3DTypesMinimal_hdr", + hdrs = ["GrD3DTypesMinimal.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/gpu:GrTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrDawnTypesPriv_hdr", + hdrs = ["GrDawnTypesPriv.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/gpu/dawn:GrDawnTypes_hdr"], +) + +generated_cc_atom( + name = "GrGLTypesPriv_hdr", + hdrs = ["GrGLTypesPriv.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/gpu/gl:GrGLTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrImageContext_hdr", + hdrs = ["GrImageContext.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":GrContext_Base_hdr", + ":GrSingleOwner_hdr", + ], +) + +generated_cc_atom( + name = "GrMockTypesPriv_hdr", + hdrs = ["GrMockTypesPriv.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/gpu/mock:GrMockTypes_hdr"], +) + +generated_cc_atom( + name = "GrMtlTypesPriv_hdr", + hdrs = ["GrMtlTypesPriv.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/gpu:GrTypes_hdr", + "//include/gpu/mtl:GrMtlTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrSingleOwner_hdr", + hdrs = ["GrSingleOwner.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMutex_hdr", + ":SkThreadID_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrTypesPriv_hdr", + hdrs = ["GrTypesPriv.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkImageInfoPriv_hdr", + ":SkMacros_hdr", + "//include/core:SkImageInfo_hdr", + "//include/core:SkImage_hdr", + "//include/core:SkPath_hdr", + "//include/core:SkRefCnt_hdr", + "//include/gpu:GrTypes_hdr", + ], +) + +generated_cc_atom( + name = "GrVkTypesPriv_hdr", + hdrs = ["GrVkTypesPriv.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRefCnt_hdr", + "//include/gpu/vk:GrVkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkBitmaskEnum_hdr", + hdrs = ["SkBitmaskEnum.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkChecksum_hdr", + hdrs = ["SkChecksum.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkNoncopyable_hdr", + ":SkOpts_spi_hdr", + ":SkTLogic_hdr", + "//include/core:SkString_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkColorData_hdr", + hdrs = ["SkColorData.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkNx_hdr", + ":SkTo_hdr", + "//include/core:SkColorPriv_hdr", + "//include/core:SkColor_hdr", + ], +) + +generated_cc_atom( + name = "SkDeque_hdr", + hdrs = ["SkDeque.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkEncodedInfo_hdr", + hdrs = ["SkEncodedInfo.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkData_hdr", + "//include/core:SkImageInfo_hdr", + "//include/third_party/skcms:skcms_hdr", + ], +) + +generated_cc_atom( + name = "SkFixed_hdr", + hdrs = ["SkFixed.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkSafe_math_hdr", + ":SkTPin_hdr", + ":SkTo_hdr", + "//include/core:SkScalar_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkFloatBits_hdr", + hdrs = ["SkFloatBits.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkSafe_math_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkFloatingPoint_hdr", + hdrs = ["SkFloatingPoint.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkFloatBits_hdr", + ":SkSafe_math_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkHalf_hdr", + hdrs = ["SkHalf.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkNx_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkIDChangeListener_hdr", + hdrs = ["SkIDChangeListener.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMutex_hdr", + ":SkTDArray_hdr", + "//include/core:SkRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "SkImageInfoPriv_hdr", + hdrs = ["SkImageInfoPriv.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkColor_hdr", + "//include/core:SkImageInfo_hdr", + ], +) + +generated_cc_atom( + name = "SkMacros_hdr", + hdrs = ["SkMacros.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkMalloc_hdr", + hdrs = ["SkMalloc.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkMutex_hdr", + hdrs = ["SkMutex.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMacros_hdr", + ":SkSemaphore_hdr", + ":SkThreadAnnotations_hdr", + ":SkThreadID_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkNoncopyable_hdr", + hdrs = ["SkNoncopyable.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkNx_hdr", + hdrs = ["SkNx.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkNx_neon_hdr", + ":SkNx_sse_hdr", + ":SkSafe_math_hdr", + "//include/core:SkScalar_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkNx_neon_hdr", + hdrs = ["SkNx_neon.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkNx_sse_hdr", + hdrs = ["SkNx_sse.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkOnce_hdr", + hdrs = ["SkOnce.h"], + visibility = ["//:__subpackages__"], + deps = [":SkThreadAnnotations_hdr"], +) + +generated_cc_atom( + name = "SkOpts_spi_hdr", + hdrs = ["SkOpts_spi.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkPathRef_hdr", + hdrs = ["SkPathRef.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkIDChangeListener_hdr", + ":SkMutex_hdr", + ":SkTDArray_hdr", + ":SkTemplates_hdr", + ":SkTo_hdr", + "//include/core:SkMatrix_hdr", + "//include/core:SkPoint_hdr", + "//include/core:SkRRect_hdr", + "//include/core:SkRect_hdr", + "//include/core:SkRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "SkSLDefines_hdr", + hdrs = ["SkSLDefines.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkTArray_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkSLIRNode_hdr", + hdrs = ["SkSLIRNode.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkSLString_hdr", + ":SkTArray_hdr", + "//src/sksl:SkSLLexer_hdr", + "//src/sksl:SkSLModifiersPool_hdr", + "//src/sksl:SkSLPool_hdr", + ], +) + +generated_cc_atom( + name = "SkSLLayout_hdr", + hdrs = ["SkSLLayout.h"], + visibility = ["//:__subpackages__"], + deps = [":SkSLString_hdr"], +) + +generated_cc_atom( + name = "SkSLModifiers_hdr", + hdrs = ["SkSLModifiers.h"], + visibility = ["//:__subpackages__"], + deps = [":SkSLLayout_hdr"], +) + +generated_cc_atom( + name = "SkSLProgramElement_hdr", + hdrs = ["SkSLProgramElement.h"], + visibility = ["//:__subpackages__"], + deps = [":SkSLIRNode_hdr"], +) + +generated_cc_atom( + name = "SkSLProgramKind_hdr", + hdrs = ["SkSLProgramKind.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkSLSampleUsage_hdr", + hdrs = ["SkSLSampleUsage.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkSLStatement_hdr", + hdrs = ["SkSLStatement.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkSLIRNode_hdr", + ":SkSLSymbol_hdr", + ], +) + +generated_cc_atom( + name = "SkSLString_hdr", + hdrs = ["SkSLString.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkSLDefines_hdr", + "//include/core:SkStringView_hdr", + "//include/core:SkString_hdr", + ], +) + +generated_cc_atom( + name = "SkSLSymbol_hdr", + hdrs = ["SkSLSymbol.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkSLIRNode_hdr", + ":SkSLProgramElement_hdr", + ], +) + +generated_cc_atom( + name = "SkSafe32_hdr", + hdrs = ["SkSafe32.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkSafe_math_hdr", + hdrs = ["SkSafe_math.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkSemaphore_hdr", + hdrs = ["SkSemaphore.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkOnce_hdr", + ":SkThreadAnnotations_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkShadowFlags_hdr", + hdrs = ["SkShadowFlags.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkSpinlock_hdr", + hdrs = ["SkSpinlock.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkThreadAnnotations_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkTArray_hdr", + hdrs = ["SkTArray.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMalloc_hdr", + ":SkSafe32_hdr", + ":SkTLogic_hdr", + ":SkTemplates_hdr", + ":SkTo_hdr", + "//include/core:SkMath_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkTDArray_hdr", + hdrs = ["SkTDArray.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMalloc_hdr", + ":SkTo_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkTFitsIn_hdr", + hdrs = ["SkTFitsIn.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkTHash_hdr", + hdrs = ["SkTHash.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkChecksum_hdr", + ":SkTemplates_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkTLogic_hdr", + hdrs = ["SkTLogic.h"], + visibility = ["//:__subpackages__"], + deps = [":SkTo_hdr"], +) + +generated_cc_atom( + name = "SkTOptional_hdr", + hdrs = ["SkTOptional.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkTPin_hdr", + hdrs = ["SkTPin.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkTemplates_hdr", + hdrs = ["SkTemplates.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkMalloc_hdr", + ":SkTLogic_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkThreadAnnotations_hdr", + hdrs = ["SkThreadAnnotations.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkThreadID_hdr", + hdrs = ["SkThreadID.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkTo_hdr", + hdrs = ["SkTo.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkTFitsIn_hdr", + "//include/core:SkTypes_hdr", + ], +) + +generated_cc_atom( + name = "SkVx_hdr", + hdrs = ["SkVx.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "SkWeakRefCnt_hdr", + hdrs = ["SkWeakRefCnt.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkRefCnt_hdr"], +) + +generated_cc_atom( + name = "SkPaintParamsKey_hdr", + hdrs = ["SkPaintParamsKey.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkShaderCodeDictionary_hdr", + hdrs = ["SkShaderCodeDictionary.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkPaintParamsKey_hdr", + ":SkSpinlock_hdr", + ":SkUniquePaintParamsID_hdr", + "//src/core:SkArenaAlloc_hdr", + ], +) + +generated_cc_atom( + name = "SkUniquePaintParamsID_hdr", + hdrs = ["SkUniquePaintParamsID.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) diff --git a/src/deps/skia/include/private/GrContext_Base.h b/src/deps/skia/include/private/GrContext_Base.h new file mode 100644 index 000000000..19c367da4 --- /dev/null +++ b/src/deps/skia/include/private/GrContext_Base.h @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrContext_Base_DEFINED +#define GrContext_Base_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/gpu/GrBackendSurface.h" +#include "include/gpu/GrContextOptions.h" +#include "include/gpu/GrTypes.h" + +class GrBaseContextPriv; +class GrCaps; +class GrContextThreadSafeProxy; +class GrDirectContext; +class GrImageContext; +class GrRecordingContext; + +class GrContext_Base : public SkRefCnt { +public: + ~GrContext_Base() override; + + /* + * Safely downcast to a GrDirectContext. + */ + virtual GrDirectContext* asDirectContext() { return nullptr; } + + /* + * The 3D API backing this context + */ + SK_API GrBackendApi backend() const; + + /* + * Retrieve the default GrBackendFormat for a given SkColorType and renderability. + * It is guaranteed that this backend format will be the one used by the GrContext + * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods. + * + * The caller should check that the returned format is valid. + */ + SK_API GrBackendFormat defaultBackendFormat(SkColorType, GrRenderable) const; + + SK_API GrBackendFormat compressedBackendFormat(SkImage::CompressionType) const; + + // TODO: When the public version is gone, rename to refThreadSafeProxy and add raw ptr ver. + sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); + + // Provides access to functions that aren't part of the public API. + GrBaseContextPriv priv(); + const GrBaseContextPriv priv() const; // NOLINT(readability-const-return-type) + +protected: + friend class GrBaseContextPriv; // for hidden functions + + GrContext_Base(sk_sp<GrContextThreadSafeProxy>); + + virtual bool init(); + + /** + * An identifier for this context. The id is used by all compatible contexts. For example, + * if SkImages are created on one thread using an image creation context, then fed into a + * DDL Recorder on second thread (which has a recording context) and finally replayed on + * a third thread with a direct context, then all three contexts will report the same id. + * It is an error for an image to be used with contexts that report different ids. + */ + uint32_t contextID() const; + + bool matches(GrContext_Base* candidate) const { + return candidate && candidate->contextID() == this->contextID(); + } + + /* + * The options in effect for this context + */ + const GrContextOptions& options() const; + + const GrCaps* caps() const; + sk_sp<const GrCaps> refCaps() const; + + virtual GrImageContext* asImageContext() { return nullptr; } + virtual GrRecordingContext* asRecordingContext() { return nullptr; } + + sk_sp<GrContextThreadSafeProxy> fThreadSafeProxy; + +private: + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/private/GrD3DTypesMinimal.h b/src/deps/skia/include/private/GrD3DTypesMinimal.h new file mode 100644 index 000000000..049c07bff --- /dev/null +++ b/src/deps/skia/include/private/GrD3DTypesMinimal.h @@ -0,0 +1,74 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrD3DTypesMinimal_DEFINED +#define GrD3DTypesMinimal_DEFINED + +// Minimal definitions of Direct3D types, without including d3d12.h + +#include "include/core/SkRefCnt.h" + +#include <dxgiformat.h> + +#include "include/gpu/GrTypes.h" + +struct ID3D12Resource; +class GrD3DResourceState; +typedef int GrD3DResourceStateEnum; +struct GrD3DSurfaceInfo; +struct GrD3DTextureResourceInfo; +struct GrD3DTextureResourceSpec; +struct GrD3DFenceInfo; + +// This struct is to used to store the the actual information about the Direct3D backend image on +// GrBackendTexture and GrBackendRenderTarget. When a client calls getD3DTextureInfo on a +// GrBackendTexture/RenderTarget, we use the GrD3DBackendSurfaceInfo to create a snapshot +// GrD3DTextureResourceInfo object. Internally, this uses a ref count GrD3DResourceState object to +// track the current D3D12_RESOURCE_STATES which can be shared with an internal GrD3DTextureResource +// so that state updates can be seen by all users of the texture. +struct GrD3DBackendSurfaceInfo { + GrD3DBackendSurfaceInfo(const GrD3DTextureResourceInfo& info, GrD3DResourceState* state); + + void cleanup(); + + GrD3DBackendSurfaceInfo& operator=(const GrD3DBackendSurfaceInfo&) = delete; + + // Assigns the passed in GrD3DBackendSurfaceInfo to this object. if isValid is true we will also + // attempt to unref the old fLayout on this object. + void assign(const GrD3DBackendSurfaceInfo&, bool isValid); + + void setResourceState(GrD3DResourceStateEnum state); + + sk_sp<GrD3DResourceState> getGrD3DResourceState() const; + + GrD3DTextureResourceInfo snapTextureResourceInfo() const; + + bool isProtected() const; +#if GR_TEST_UTILS + bool operator==(const GrD3DBackendSurfaceInfo& that) const; +#endif + +private: + GrD3DTextureResourceInfo* fTextureResourceInfo; + GrD3DResourceState* fResourceState; +}; + +struct GrD3DTextureResourceSpecHolder { +public: + GrD3DTextureResourceSpecHolder(const GrD3DSurfaceInfo&); + + void cleanup(); + + GrD3DSurfaceInfo getSurfaceInfo(uint32_t sampleCount, + uint32_t levelCount, + GrProtected isProtected) const; + +private: + GrD3DTextureResourceSpec* fSpec; +}; + +#endif diff --git a/src/deps/skia/include/private/GrDawnTypesPriv.h b/src/deps/skia/include/private/GrDawnTypesPriv.h new file mode 100644 index 000000000..5eacf2ea2 --- /dev/null +++ b/src/deps/skia/include/private/GrDawnTypesPriv.h @@ -0,0 +1,26 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrDawnTypesPriv_DEFINED +#define GrDawnTypesPriv_DEFINED + +#include "include/gpu/dawn/GrDawnTypes.h" + +struct GrDawnTextureSpec { + GrDawnTextureSpec() {} + GrDawnTextureSpec(const GrDawnSurfaceInfo& info) : fFormat(info.fFormat) {} + + wgpu::TextureFormat fFormat; +}; + +GrDawnSurfaceInfo GrDawnTextureSpecToSurfaceInfo(const GrDawnTextureSpec& dawnSpec, + uint32_t sampleCount, + uint32_t levelCount, + GrProtected isProtected); + +#endif + diff --git a/src/deps/skia/include/private/GrGLTypesPriv.h b/src/deps/skia/include/private/GrGLTypesPriv.h new file mode 100644 index 000000000..4abef05c7 --- /dev/null +++ b/src/deps/skia/include/private/GrGLTypesPriv.h @@ -0,0 +1,107 @@ +/* + * Copyright 2019 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "include/core/SkRefCnt.h" +#include "include/gpu/gl/GrGLTypes.h" + +#ifndef GrGLTypesPriv_DEFINED +#define GrGLTypesPriv_DEFINED + +static constexpr int kGrGLColorFormatCount = static_cast<int>(GrGLFormat::kLastColorFormat) + 1; + +class GrGLTextureParameters : public SkNVRefCnt<GrGLTextureParameters> { +public: + // We currently consider texture parameters invalid on all textures + // GrContext::resetContext(). We use this type to track whether instances of + // GrGLTextureParameters were updated before or after the most recent resetContext(). At 10 + // resets / frame and 60fps a 64bit timestamp will overflow in about a billion years. + // TODO: Require clients to use GrBackendTexture::glTextureParametersModified() to invalidate + // texture parameters and get rid of timestamp checking. + using ResetTimestamp = uint64_t; + + // This initializes the params to have an expired timestamp. They'll be considered invalid the + // first time the texture is used unless set() is called. + GrGLTextureParameters() = default; + + // This is texture parameter state that is overridden when a non-zero sampler object is bound. + struct SamplerOverriddenState { + SamplerOverriddenState(); + void invalidate(); + + GrGLenum fMinFilter; + GrGLenum fMagFilter; + GrGLenum fWrapS; + GrGLenum fWrapT; + GrGLfloat fMinLOD; + GrGLfloat fMaxLOD; + // We always want the border color to be transparent black, so no need to store 4 floats. + // Just track if it's been invalidated and no longer the default + bool fBorderColorInvalid; + }; + + // Texture parameter state that is not overridden by a bound sampler object. + struct NonsamplerState { + NonsamplerState(); + void invalidate(); + + GrGLint fBaseMipMapLevel; + GrGLint fMaxMipmapLevel; + bool fSwizzleIsRGBA; + }; + + void invalidate(); + + ResetTimestamp resetTimestamp() const { return fResetTimestamp; } + const SamplerOverriddenState& samplerOverriddenState() const { return fSamplerOverriddenState; } + const NonsamplerState& nonsamplerState() const { return fNonsamplerState; } + + // SamplerOverriddenState is optional because we don't track it when we're using sampler + // objects. + void set(const SamplerOverriddenState* samplerState, + const NonsamplerState& nonsamplerState, + ResetTimestamp currTimestamp); + +private: + static constexpr ResetTimestamp kExpiredTimestamp = 0; + + SamplerOverriddenState fSamplerOverriddenState; + NonsamplerState fNonsamplerState; + ResetTimestamp fResetTimestamp = kExpiredTimestamp; +}; + +class GrGLBackendTextureInfo { +public: + GrGLBackendTextureInfo(const GrGLTextureInfo& info, GrGLTextureParameters* params) + : fInfo(info), fParams(params) {} + GrGLBackendTextureInfo(const GrGLBackendTextureInfo&) = delete; + GrGLBackendTextureInfo& operator=(const GrGLBackendTextureInfo&) = delete; + const GrGLTextureInfo& info() const { return fInfo; } + GrGLTextureParameters* parameters() const { return fParams; } + sk_sp<GrGLTextureParameters> refParameters() const { return sk_ref_sp(fParams); } + + void cleanup(); + void assign(const GrGLBackendTextureInfo&, bool thisIsValid); + +private: + GrGLTextureInfo fInfo; + GrGLTextureParameters* fParams; +}; + +struct GrGLTextureSpec { + GrGLTextureSpec() : fTarget(0), fFormat(0) {} + GrGLTextureSpec(const GrGLSurfaceInfo& info) : fTarget(info.fTarget), fFormat(info.fFormat) {} + + GrGLenum fTarget; + GrGLenum fFormat; +}; + +GrGLSurfaceInfo GrGLTextureSpecToSurfaceInfo(const GrGLTextureSpec& glSpec, + uint32_t sampleCount, + uint32_t levelCount, + GrProtected isProtected); + +#endif diff --git a/src/deps/skia/include/private/GrImageContext.h b/src/deps/skia/include/private/GrImageContext.h new file mode 100644 index 000000000..8a9f558f3 --- /dev/null +++ b/src/deps/skia/include/private/GrImageContext.h @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrImageContext_DEFINED +#define GrImageContext_DEFINED + +#include "include/private/GrContext_Base.h" +#include "include/private/GrSingleOwner.h" + +class GrImageContextPriv; + +// This is now just a view on a ThreadSafeProxy, that SkImages can attempt to +// downcast to a GrDirectContext as a backdoor to some operations. Once we remove the backdoors, +// this goes away and SkImages just hold ThreadSafeProxies. +class GrImageContext : public GrContext_Base { +public: + ~GrImageContext() override; + + // Provides access to functions that aren't part of the public API. + GrImageContextPriv priv(); + const GrImageContextPriv priv() const; // NOLINT(readability-const-return-type) + +protected: + friend class GrImageContextPriv; // for hidden functions + + GrImageContext(sk_sp<GrContextThreadSafeProxy>); + + SK_API virtual void abandonContext(); + SK_API virtual bool abandoned(); + + /** This is only useful for debug purposes */ + GrSingleOwner* singleOwner() const { return &fSingleOwner; } + + GrImageContext* asImageContext() override { return this; } + +private: + // When making promise images, we currently need a placeholder GrImageContext instance to give + // to the SkImage that has no real power, just a wrapper around the ThreadSafeProxy. + // TODO: De-power SkImage to ThreadSafeProxy or at least figure out a way to share one instance. + static sk_sp<GrImageContext> MakeForPromiseImage(sk_sp<GrContextThreadSafeProxy>); + + // In debug builds we guard against improper thread handling + // This guard is passed to the GrDrawingManager and, from there to all the + // GrSurfaceDrawContexts. It is also passed to the GrResourceProvider and SkGpuDevice. + // TODO: Move this down to GrRecordingContext. + mutable GrSingleOwner fSingleOwner; + + using INHERITED = GrContext_Base; +}; + +#endif diff --git a/src/deps/skia/include/private/GrMockTypesPriv.h b/src/deps/skia/include/private/GrMockTypesPriv.h new file mode 100644 index 000000000..fc72c7fd9 --- /dev/null +++ b/src/deps/skia/include/private/GrMockTypesPriv.h @@ -0,0 +1,31 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrMockTypesPriv_DEFINED +#define GrMockTypesPriv_DEFINED + +#include "include/gpu/mock/GrMockTypes.h" + +struct GrMockTextureSpec { + GrMockTextureSpec() + : fColorType(GrColorType::kUnknown) + , fCompressionType(SkImage::CompressionType::kNone) {} + GrMockTextureSpec(const GrMockSurfaceInfo& info) + : fColorType(info.fColorType) + , fCompressionType(info.fCompressionType) {} + + GrColorType fColorType = GrColorType::kUnknown; + SkImage::CompressionType fCompressionType = SkImage::CompressionType::kNone; +}; + +GrMockSurfaceInfo GrMockTextureSpecToSurfaceInfo(const GrMockTextureSpec& mockSpec, + uint32_t sampleCount, + uint32_t levelCount, + GrProtected isProtected); + +#endif + diff --git a/src/deps/skia/include/private/GrMtlTypesPriv.h b/src/deps/skia/include/private/GrMtlTypesPriv.h new file mode 100644 index 000000000..550d01760 --- /dev/null +++ b/src/deps/skia/include/private/GrMtlTypesPriv.h @@ -0,0 +1,75 @@ +/* + * Copyright 2021 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrMtlTypesPriv_DEFINED +#define GrMtlTypesPriv_DEFINED + +#include "include/gpu/GrTypes.h" +#include "include/gpu/mtl/GrMtlTypes.h" + +/////////////////////////////////////////////////////////////////////////////// + +#ifdef __APPLE__ + +#include <TargetConditionals.h> + +#if defined(SK_BUILD_FOR_MAC) +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 110000 +#define GR_METAL_SDK_VERSION 230 +#elif __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500 +#define GR_METAL_SDK_VERSION 220 +#elif __MAC_OS_X_VERSION_MAX_ALLOWED >= 101400 +#define GR_METAL_SDK_VERSION 210 +#else +#error Must use at least 10.14 SDK to build Metal backend for MacOS +#endif +#else +#if __IPHONE_OS_VERSION_MAX_ALLOWED >= 140000 || __TV_OS_VERSION_MAX_ALLOWED >= 140000 +#define GR_METAL_SDK_VERSION 230 +#elif __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000 || __TV_OS_VERSION_MAX_ALLOWED >= 130000 +#define GR_METAL_SDK_VERSION 220 +#elif __IPHONE_OS_VERSION_MAX_ALLOWED >= 120000 || __TV_OS_VERSION_MAX_ALLOWED >= 120000 +#define GR_METAL_SDK_VERSION 210 +#else +#error Must use at least 12.00 SDK to build Metal backend for iOS +#endif +#endif + +#if __has_feature(objc_arc) && __has_attribute(objc_externally_retained) +#define GR_NORETAIN __attribute__((objc_externally_retained)) +#define GR_NORETAIN_BEGIN \ + _Pragma("clang attribute push (__attribute__((objc_externally_retained)), apply_to=any(function,objc_method))") +#define GR_NORETAIN_END _Pragma("clang attribute pop") +#else +#define GR_NORETAIN +#define GR_NORETAIN_BEGIN +#define GR_NORETAIN_END +#endif + +struct GrMtlTextureSpec { + GrMtlTextureSpec() + : fFormat(0) + , fUsage(0) + , fStorageMode(0) {} + GrMtlTextureSpec(const GrMtlSurfaceInfo& info) + : fFormat(info.fFormat) + , fUsage(info.fUsage) + , fStorageMode(info.fStorageMode) {} + + GrMTLPixelFormat fFormat; + GrMTLTextureUsage fUsage; + GrMTLStorageMode fStorageMode; +}; + +GrMtlSurfaceInfo GrMtlTextureSpecToSurfaceInfo(const GrMtlTextureSpec& mtlSpec, + uint32_t sampleCount, + uint32_t levelCount, + GrProtected isProtected); + +#endif // __APPLE__ + +#endif // GrMtlTypesPriv_DEFINED diff --git a/src/deps/skia/include/private/GrSingleOwner.h b/src/deps/skia/include/private/GrSingleOwner.h new file mode 100644 index 000000000..f612bb5fc --- /dev/null +++ b/src/deps/skia/include/private/GrSingleOwner.h @@ -0,0 +1,65 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrSingleOwner_DEFINED +#define GrSingleOwner_DEFINED + +#include "include/core/SkTypes.h" + +#ifdef SK_DEBUG +#include "include/private/SkMutex.h" +#include "include/private/SkThreadID.h" + +#define GR_ASSERT_SINGLE_OWNER(obj) \ + GrSingleOwner::AutoEnforce debug_SingleOwner(obj, __FILE__, __LINE__); + +// This is a debug tool to verify an object is only being used from one thread at a time. +class GrSingleOwner { +public: + GrSingleOwner() : fOwner(kIllegalThreadID), fReentranceCount(0) {} + + struct AutoEnforce { + AutoEnforce(GrSingleOwner* so, const char* file, int line) + : fFile(file), fLine(line), fSO(so) { + fSO->enter(file, line); + } + ~AutoEnforce() { fSO->exit(fFile, fLine); } + + const char* fFile; + int fLine; + GrSingleOwner* fSO; + }; + +private: + void enter(const char* file, int line) { + SkAutoMutexExclusive lock(fMutex); + SkThreadID self = SkGetThreadID(); + SkASSERTF(fOwner == self || fOwner == kIllegalThreadID, "%s:%d Single owner failure.", + file, line); + fReentranceCount++; + fOwner = self; + } + + void exit(const char* file, int line) { + SkAutoMutexExclusive lock(fMutex); + SkASSERTF(fOwner == SkGetThreadID(), "%s:%d Single owner failure.", file, line); + fReentranceCount--; + if (fReentranceCount == 0) { + fOwner = kIllegalThreadID; + } + } + + SkMutex fMutex; + SkThreadID fOwner SK_GUARDED_BY(fMutex); + int fReentranceCount SK_GUARDED_BY(fMutex); +}; +#else +#define GR_ASSERT_SINGLE_OWNER(obj) +class GrSingleOwner {}; // Provide a no-op implementation so we can pass pointers to constructors +#endif + +#endif diff --git a/src/deps/skia/include/private/GrTypesPriv.h b/src/deps/skia/include/private/GrTypesPriv.h new file mode 100644 index 000000000..cba4b4c79 --- /dev/null +++ b/src/deps/skia/include/private/GrTypesPriv.h @@ -0,0 +1,1354 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrTypesPriv_DEFINED +#define GrTypesPriv_DEFINED + +#include <chrono> +#include "include/core/SkImage.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkPath.h" +#include "include/core/SkRefCnt.h" +#include "include/gpu/GrTypes.h" +#include "include/private/SkImageInfoPriv.h" +#include "include/private/SkMacros.h" + +class GrBackendFormat; +class GrCaps; +class GrSurfaceProxy; + +// The old libstdc++ uses the draft name "monotonic_clock" rather than "steady_clock". This might +// not actually be monotonic, depending on how libstdc++ was built. However, this is only currently +// used for idle resource purging so it shouldn't cause a correctness problem. +#if defined(__GLIBCXX__) && (__GLIBCXX__ < 20130000) +using GrStdSteadyClock = std::chrono::monotonic_clock; +#else +using GrStdSteadyClock = std::chrono::steady_clock; +#endif + +/** + * divide, rounding up + */ + +static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; } + +/** + * Geometric primitives used for drawing. + */ +enum class GrPrimitiveType : uint8_t { + kTriangles, + kTriangleStrip, + kPoints, + kLines, // 1 pix wide only + kLineStrip, // 1 pix wide only + kPatches, + kPath +}; +static constexpr int kNumGrPrimitiveTypes = (int)GrPrimitiveType::kPath + 1; + +static constexpr bool GrIsPrimTypeLines(GrPrimitiveType type) { + return GrPrimitiveType::kLines == type || GrPrimitiveType::kLineStrip == type; +} + +enum class GrPrimitiveRestart : bool { + kNo = false, + kYes = true +}; + +/** + * Should a created surface be texturable? + */ +enum class GrTexturable : bool { + kNo = false, + kYes = true +}; + +// A DDL recorder has its own proxy provider and proxy cache. This enum indicates if +// a given proxy provider is one of these special ones. +enum class GrDDLProvider : bool { + kNo = false, + kYes = true +}; + +/** + * Formats for masks, used by the font cache. Important that these are 0-based. + */ +enum GrMaskFormat { + kA8_GrMaskFormat, //!< 1-byte per pixel + kA565_GrMaskFormat, //!< 2-bytes per pixel, RGB represent 3-channel LCD coverage + kARGB_GrMaskFormat, //!< 4-bytes per pixel, color format + + kLast_GrMaskFormat = kARGB_GrMaskFormat +}; +static const int kMaskFormatCount = kLast_GrMaskFormat + 1; + +/** + * Return the number of bytes-per-pixel for the specified mask format. + */ +inline constexpr int GrMaskFormatBytesPerPixel(GrMaskFormat format) { + SkASSERT(format < kMaskFormatCount); + // kA8 (0) -> 1 + // kA565 (1) -> 2 + // kARGB (2) -> 4 + static_assert(kA8_GrMaskFormat == 0, "enum_order_dependency"); + static_assert(kA565_GrMaskFormat == 1, "enum_order_dependency"); + static_assert(kARGB_GrMaskFormat == 2, "enum_order_dependency"); + + return SkTo<int>(1u << format); +} + +/** Ownership rules for external GPU resources imported into Skia. */ +enum GrWrapOwnership { + /** Skia will assume the client will keep the resource alive and Skia will not free it. */ + kBorrow_GrWrapOwnership, + + /** Skia will assume ownership of the resource and free it. */ + kAdopt_GrWrapOwnership, +}; + +enum class GrWrapCacheable : bool { + /** + * The wrapped resource will be removed from the cache as soon as it becomes purgeable. It may + * still be assigned and found by a unique key, but the presence of the key will not be used to + * keep the resource alive when it has no references. + */ + kNo = false, + /** + * The wrapped resource is allowed to remain in the GrResourceCache when it has no references + * but has a unique key. Such resources should only be given unique keys when it is known that + * the key will eventually be removed from the resource or invalidated via the message bus. + */ + kYes = true +}; + +enum class GrBudgetedType : uint8_t { + /** The resource is budgeted and is subject to purging under budget pressure. */ + kBudgeted, + /** + * The resource is unbudgeted and is purged as soon as it has no refs regardless of whether + * it has a unique or scratch key. + */ + kUnbudgetedUncacheable, + /** + * The resource is unbudgeted and is allowed to remain in the cache with no refs if it + * has a unique key. Scratch keys are ignored. + */ + kUnbudgetedCacheable, +}; + +enum class GrScissorTest : bool { + kDisabled = false, + kEnabled = true +}; + +/* + * Used to say whether texture is backed by memory. + */ +enum class GrMemoryless : bool { + /** + * The texture will be allocated normally and will affect memory budgets. + */ + kNo = false, + /** + * The texture will be not use GPU memory and will not affect memory budgets. + */ + kYes = true +}; + +struct GrMipLevel { + const void* fPixels = nullptr; + size_t fRowBytes = 0; + // This may be used to keep fPixels from being freed while a GrMipLevel exists. + sk_sp<SkData> fOptionalStorage; +}; + +enum class GrSemaphoreWrapType { + kWillSignal, + kWillWait, +}; + +/** + * This enum is used to specify the load operation to be used when an OpsTask/GrOpsRenderPass + * begins execution. + */ +enum class GrLoadOp { + kLoad, + kClear, + kDiscard, +}; + +/** + * This enum is used to specify the store operation to be used when an OpsTask/GrOpsRenderPass + * ends execution. + */ +enum class GrStoreOp { + kStore, + kDiscard, +}; + +/** + * Used to control antialiasing in draw calls. + */ +enum class GrAA : bool { + kNo = false, + kYes = true +}; + +enum class GrFillRule : bool { + kNonzero, + kEvenOdd +}; + +inline GrFillRule GrFillRuleForPathFillType(SkPathFillType fillType) { + switch (fillType) { + case SkPathFillType::kWinding: + case SkPathFillType::kInverseWinding: + return GrFillRule::kNonzero; + case SkPathFillType::kEvenOdd: + case SkPathFillType::kInverseEvenOdd: + return GrFillRule::kEvenOdd; + } + SkUNREACHABLE; +} + +inline GrFillRule GrFillRuleForSkPath(const SkPath& path) { + return GrFillRuleForPathFillType(path.getFillType()); +} + +/** This enum indicates the type of antialiasing to be performed. */ +enum class GrAAType : unsigned { + /** No antialiasing */ + kNone, + /** Use fragment shader code to blend with a fractional pixel coverage. */ + kCoverage, + /** Use normal MSAA. */ + kMSAA, + + kLast = kMSAA +}; +static const int kGrAATypeCount = static_cast<int>(GrAAType::kLast) + 1; + +static constexpr bool GrAATypeIsHW(GrAAType type) { + switch (type) { + case GrAAType::kNone: + return false; + case GrAAType::kCoverage: + return false; + case GrAAType::kMSAA: + return true; + } + SkUNREACHABLE; +} + +/** + * Some pixel configs are inherently clamped to [0,1], some are allowed to go outside that range, + * and some are FP but manually clamped in the XP. + */ +enum class GrClampType { + kAuto, // Normalized, fixed-point configs + kManual, // Clamped FP configs + kNone, // Normal (unclamped) FP configs +}; + +/** + * A number of rectangle/quadrilateral drawing APIs can control anti-aliasing on a per edge basis. + * These masks specify which edges are AA'ed. The intent for this is to support tiling with seamless + * boundaries, where the inner edges are non-AA and the outer edges are AA. Regular draws (where AA + * is specified by GrAA) is almost equivalent to kNone or kAll, with the exception of how MSAA is + * handled. + * + * When tiling and there is MSAA, mixed edge rectangles are processed with MSAA, so in order for the + * tiled edges to remain seamless, inner tiles with kNone must also be processed with MSAA. In + * regular drawing, however, kNone should disable MSAA (if it's supported) to match the expected + * appearance. + * + * Therefore, APIs that use per-edge AA flags also take a GrAA value so that they can differentiate + * between the regular and tiling use case behaviors. Tiling operations should always pass + * GrAA::kYes while regular options should pass GrAA based on the SkPaint's anti-alias state. + * + * These values are identical to SkCanvas::QuadAAFlags. + */ +enum class GrQuadAAFlags { + kLeft = 0b0001, + kTop = 0b0010, + kRight = 0b0100, + kBottom = 0b1000, + + kNone = 0b0000, + kAll = 0b1111, +}; + +GR_MAKE_BITFIELD_CLASS_OPS(GrQuadAAFlags) + +static inline GrQuadAAFlags SkToGrQuadAAFlags(unsigned flags) { + return static_cast<GrQuadAAFlags>(flags); +} + +/** + * Types of shader-language-specific boxed variables we can create. + */ +enum GrSLType { + kVoid_GrSLType, + kBool_GrSLType, + kBool2_GrSLType, + kBool3_GrSLType, + kBool4_GrSLType, + kShort_GrSLType, + kShort2_GrSLType, + kShort3_GrSLType, + kShort4_GrSLType, + kUShort_GrSLType, + kUShort2_GrSLType, + kUShort3_GrSLType, + kUShort4_GrSLType, + kFloat_GrSLType, + kFloat2_GrSLType, + kFloat3_GrSLType, + kFloat4_GrSLType, + kFloat2x2_GrSLType, + kFloat3x3_GrSLType, + kFloat4x4_GrSLType, + kHalf_GrSLType, + kHalf2_GrSLType, + kHalf3_GrSLType, + kHalf4_GrSLType, + kHalf2x2_GrSLType, + kHalf3x3_GrSLType, + kHalf4x4_GrSLType, + kInt_GrSLType, + kInt2_GrSLType, + kInt3_GrSLType, + kInt4_GrSLType, + kUInt_GrSLType, + kUInt2_GrSLType, + kUInt3_GrSLType, + kUInt4_GrSLType, + kTexture2DSampler_GrSLType, + kTextureExternalSampler_GrSLType, + kTexture2DRectSampler_GrSLType, + kTexture2D_GrSLType, + kSampler_GrSLType, + kInput_GrSLType, + + kLast_GrSLType = kInput_GrSLType +}; +static const int kGrSLTypeCount = kLast_GrSLType + 1; + +/** + * The type of texture. Backends other than GL currently only use the 2D value but the type must + * still be known at the API-neutral layer as it used to determine whether MIP maps, renderability, + * and sampling parameters are legal for proxies that will be instantiated with wrapped textures. + */ +enum class GrTextureType { + kNone, + k2D, + /* Rectangle uses unnormalized texture coordinates. */ + kRectangle, + kExternal +}; + +enum GrShaderType { + kVertex_GrShaderType, + kFragment_GrShaderType, + + kLastkFragment_GrShaderType = kFragment_GrShaderType +}; +static const int kGrShaderTypeCount = kLastkFragment_GrShaderType + 1; + +enum GrShaderFlags { + kNone_GrShaderFlags = 0, + kVertex_GrShaderFlag = 1 << 0, + kTessControl_GrShaderFlag = 1 << 1, + kTessEvaluation_GrShaderFlag = 1 << 2, + kFragment_GrShaderFlag = 1 << 3 +}; +SK_MAKE_BITFIELD_OPS(GrShaderFlags) + +/** Is the shading language type float (including vectors/matrices)? */ +static constexpr bool GrSLTypeIsFloatType(GrSLType type) { + switch (type) { + case kFloat_GrSLType: + case kFloat2_GrSLType: + case kFloat3_GrSLType: + case kFloat4_GrSLType: + case kFloat2x2_GrSLType: + case kFloat3x3_GrSLType: + case kFloat4x4_GrSLType: + case kHalf_GrSLType: + case kHalf2_GrSLType: + case kHalf3_GrSLType: + case kHalf4_GrSLType: + case kHalf2x2_GrSLType: + case kHalf3x3_GrSLType: + case kHalf4x4_GrSLType: + return true; + + case kVoid_GrSLType: + case kTexture2DSampler_GrSLType: + case kTextureExternalSampler_GrSLType: + case kTexture2DRectSampler_GrSLType: + case kBool_GrSLType: + case kBool2_GrSLType: + case kBool3_GrSLType: + case kBool4_GrSLType: + case kShort_GrSLType: + case kShort2_GrSLType: + case kShort3_GrSLType: + case kShort4_GrSLType: + case kUShort_GrSLType: + case kUShort2_GrSLType: + case kUShort3_GrSLType: + case kUShort4_GrSLType: + case kInt_GrSLType: + case kInt2_GrSLType: + case kInt3_GrSLType: + case kInt4_GrSLType: + case kUInt_GrSLType: + case kUInt2_GrSLType: + case kUInt3_GrSLType: + case kUInt4_GrSLType: + case kTexture2D_GrSLType: + case kSampler_GrSLType: + case kInput_GrSLType: + return false; + } + SkUNREACHABLE; +} + +/** Is the shading language type integral (including vectors)? */ +static constexpr bool GrSLTypeIsIntegralType(GrSLType type) { + switch (type) { + case kShort_GrSLType: + case kShort2_GrSLType: + case kShort3_GrSLType: + case kShort4_GrSLType: + case kUShort_GrSLType: + case kUShort2_GrSLType: + case kUShort3_GrSLType: + case kUShort4_GrSLType: + case kInt_GrSLType: + case kInt2_GrSLType: + case kInt3_GrSLType: + case kInt4_GrSLType: + case kUInt_GrSLType: + case kUInt2_GrSLType: + case kUInt3_GrSLType: + case kUInt4_GrSLType: + return true; + + case kFloat_GrSLType: + case kFloat2_GrSLType: + case kFloat3_GrSLType: + case kFloat4_GrSLType: + case kFloat2x2_GrSLType: + case kFloat3x3_GrSLType: + case kFloat4x4_GrSLType: + case kHalf_GrSLType: + case kHalf2_GrSLType: + case kHalf3_GrSLType: + case kHalf4_GrSLType: + case kHalf2x2_GrSLType: + case kHalf3x3_GrSLType: + case kHalf4x4_GrSLType: + case kVoid_GrSLType: + case kTexture2DSampler_GrSLType: + case kTextureExternalSampler_GrSLType: + case kTexture2DRectSampler_GrSLType: + case kBool_GrSLType: + case kBool2_GrSLType: + case kBool3_GrSLType: + case kBool4_GrSLType: + case kTexture2D_GrSLType: + case kSampler_GrSLType: + case kInput_GrSLType: + return false; + } + SkUNREACHABLE; +} + +/** + * Is the shading language type supported as a uniform (ie, does it have a corresponding set + * function on GrGLSLProgramDataManager)? + */ +static constexpr bool GrSLTypeCanBeUniformValue(GrSLType type) { + return GrSLTypeIsFloatType(type) || GrSLTypeIsIntegralType(type); +} + +/** If the type represents a single value or vector return the vector length, else -1. */ +static constexpr int GrSLTypeVecLength(GrSLType type) { + switch (type) { + case kFloat_GrSLType: + case kHalf_GrSLType: + case kBool_GrSLType: + case kShort_GrSLType: + case kUShort_GrSLType: + case kInt_GrSLType: + case kUInt_GrSLType: + return 1; + + case kFloat2_GrSLType: + case kHalf2_GrSLType: + case kBool2_GrSLType: + case kShort2_GrSLType: + case kUShort2_GrSLType: + case kInt2_GrSLType: + case kUInt2_GrSLType: + return 2; + + case kFloat3_GrSLType: + case kHalf3_GrSLType: + case kBool3_GrSLType: + case kShort3_GrSLType: + case kUShort3_GrSLType: + case kInt3_GrSLType: + case kUInt3_GrSLType: + return 3; + + case kFloat4_GrSLType: + case kHalf4_GrSLType: + case kBool4_GrSLType: + case kShort4_GrSLType: + case kUShort4_GrSLType: + case kInt4_GrSLType: + case kUInt4_GrSLType: + return 4; + + case kFloat2x2_GrSLType: + case kFloat3x3_GrSLType: + case kFloat4x4_GrSLType: + case kHalf2x2_GrSLType: + case kHalf3x3_GrSLType: + case kHalf4x4_GrSLType: + case kVoid_GrSLType: + case kTexture2DSampler_GrSLType: + case kTextureExternalSampler_GrSLType: + case kTexture2DRectSampler_GrSLType: + case kTexture2D_GrSLType: + case kSampler_GrSLType: + case kInput_GrSLType: + return -1; + } + SkUNREACHABLE; +} + +static inline GrSLType GrSLCombinedSamplerTypeForTextureType(GrTextureType type) { + switch (type) { + case GrTextureType::k2D: + return kTexture2DSampler_GrSLType; + case GrTextureType::kRectangle: + return kTexture2DRectSampler_GrSLType; + case GrTextureType::kExternal: + return kTextureExternalSampler_GrSLType; + default: + SK_ABORT("Unexpected texture type"); + } +} + +/** Rectangle and external textures only support the clamp wrap mode and do not support + * MIP maps. + */ +static inline bool GrTextureTypeHasRestrictedSampling(GrTextureType type) { + switch (type) { + case GrTextureType::k2D: + return false; + case GrTextureType::kRectangle: + return true; + case GrTextureType::kExternal: + return true; + default: + SK_ABORT("Unexpected texture type"); + } +} + +static constexpr bool GrSLTypeIsCombinedSamplerType(GrSLType type) { + switch (type) { + case kTexture2DSampler_GrSLType: + case kTextureExternalSampler_GrSLType: + case kTexture2DRectSampler_GrSLType: + return true; + + case kVoid_GrSLType: + case kFloat_GrSLType: + case kFloat2_GrSLType: + case kFloat3_GrSLType: + case kFloat4_GrSLType: + case kFloat2x2_GrSLType: + case kFloat3x3_GrSLType: + case kFloat4x4_GrSLType: + case kHalf_GrSLType: + case kHalf2_GrSLType: + case kHalf3_GrSLType: + case kHalf4_GrSLType: + case kHalf2x2_GrSLType: + case kHalf3x3_GrSLType: + case kHalf4x4_GrSLType: + case kInt_GrSLType: + case kInt2_GrSLType: + case kInt3_GrSLType: + case kInt4_GrSLType: + case kUInt_GrSLType: + case kUInt2_GrSLType: + case kUInt3_GrSLType: + case kUInt4_GrSLType: + case kBool_GrSLType: + case kBool2_GrSLType: + case kBool3_GrSLType: + case kBool4_GrSLType: + case kShort_GrSLType: + case kShort2_GrSLType: + case kShort3_GrSLType: + case kShort4_GrSLType: + case kUShort_GrSLType: + case kUShort2_GrSLType: + case kUShort3_GrSLType: + case kUShort4_GrSLType: + case kTexture2D_GrSLType: + case kSampler_GrSLType: + case kInput_GrSLType: + return false; + } + SkUNREACHABLE; +} + +////////////////////////////////////////////////////////////////////////////// + +/** + * Types used to describe format of vertices in arrays. + */ +enum GrVertexAttribType { + kFloat_GrVertexAttribType = 0, + kFloat2_GrVertexAttribType, + kFloat3_GrVertexAttribType, + kFloat4_GrVertexAttribType, + kHalf_GrVertexAttribType, + kHalf2_GrVertexAttribType, + kHalf4_GrVertexAttribType, + + kInt2_GrVertexAttribType, // vector of 2 32-bit ints + kInt3_GrVertexAttribType, // vector of 3 32-bit ints + kInt4_GrVertexAttribType, // vector of 4 32-bit ints + + + kByte_GrVertexAttribType, // signed byte + kByte2_GrVertexAttribType, // vector of 2 8-bit signed bytes + kByte4_GrVertexAttribType, // vector of 4 8-bit signed bytes + kUByte_GrVertexAttribType, // unsigned byte + kUByte2_GrVertexAttribType, // vector of 2 8-bit unsigned bytes + kUByte4_GrVertexAttribType, // vector of 4 8-bit unsigned bytes + + kUByte_norm_GrVertexAttribType, // unsigned byte, e.g. coverage, 0 -> 0.0f, 255 -> 1.0f. + kUByte4_norm_GrVertexAttribType, // vector of 4 unsigned bytes, e.g. colors, 0 -> 0.0f, + // 255 -> 1.0f. + + kShort2_GrVertexAttribType, // vector of 2 16-bit shorts. + kShort4_GrVertexAttribType, // vector of 4 16-bit shorts. + + kUShort2_GrVertexAttribType, // vector of 2 unsigned shorts. 0 -> 0, 65535 -> 65535. + kUShort2_norm_GrVertexAttribType, // vector of 2 unsigned shorts. 0 -> 0.0f, 65535 -> 1.0f. + + kInt_GrVertexAttribType, + kUInt_GrVertexAttribType, + + kUShort_norm_GrVertexAttribType, + + kUShort4_norm_GrVertexAttribType, // vector of 4 unsigned shorts. 0 -> 0.0f, 65535 -> 1.0f. + + kLast_GrVertexAttribType = kUShort4_norm_GrVertexAttribType +}; +static const int kGrVertexAttribTypeCount = kLast_GrVertexAttribType + 1; + +////////////////////////////////////////////////////////////////////////////// + +/** + * We have coverage effects that clip rendering to the edge of some geometric primitive. + * This enum specifies how that clipping is performed. Not all factories that take a + * GrClipEdgeType will succeed with all values and it is up to the caller to verify success. + */ +enum class GrClipEdgeType { + kFillBW, + kFillAA, + kInverseFillBW, + kInverseFillAA, + + kLast = kInverseFillAA +}; +static const int kGrClipEdgeTypeCnt = (int) GrClipEdgeType::kLast + 1; + +static constexpr bool GrClipEdgeTypeIsFill(const GrClipEdgeType edgeType) { + return (GrClipEdgeType::kFillAA == edgeType || GrClipEdgeType::kFillBW == edgeType); +} + +static constexpr bool GrClipEdgeTypeIsInverseFill(const GrClipEdgeType edgeType) { + return (GrClipEdgeType::kInverseFillAA == edgeType || + GrClipEdgeType::kInverseFillBW == edgeType); +} + +static constexpr bool GrClipEdgeTypeIsAA(const GrClipEdgeType edgeType) { + return (GrClipEdgeType::kFillBW != edgeType && + GrClipEdgeType::kInverseFillBW != edgeType); +} + +static inline GrClipEdgeType GrInvertClipEdgeType(const GrClipEdgeType edgeType) { + switch (edgeType) { + case GrClipEdgeType::kFillBW: + return GrClipEdgeType::kInverseFillBW; + case GrClipEdgeType::kFillAA: + return GrClipEdgeType::kInverseFillAA; + case GrClipEdgeType::kInverseFillBW: + return GrClipEdgeType::kFillBW; + case GrClipEdgeType::kInverseFillAA: + return GrClipEdgeType::kFillAA; + } + SkUNREACHABLE; +} + +/** + * Indicates the type of pending IO operations that can be recorded for gpu resources. + */ +enum GrIOType { + kRead_GrIOType, + kWrite_GrIOType, + kRW_GrIOType +}; + +/** + * Indicates the type of data that a GPU buffer will be used for. + */ +enum class GrGpuBufferType { + kVertex, + kIndex, + kDrawIndirect, + kXferCpuToGpu, + kXferGpuToCpu, + kUniform, +}; +static const int kGrGpuBufferTypeCount = static_cast<int>(GrGpuBufferType::kUniform) + 1; + +/** + * Provides a performance hint regarding the frequency at which a data store will be accessed. + */ +enum GrAccessPattern { + /** Data store will be respecified repeatedly and used many times. */ + kDynamic_GrAccessPattern, + /** Data store will be specified once and used many times. (Thus disqualified from caching.) */ + kStatic_GrAccessPattern, + /** Data store will be specified once and used at most a few times. (Also can't be cached.) */ + kStream_GrAccessPattern, + + kLast_GrAccessPattern = kStream_GrAccessPattern +}; + +// Flags shared between the GrSurface & GrSurfaceProxy class hierarchies +enum class GrInternalSurfaceFlags { + kNone = 0, + + // Texture-level + + // Means the pixels in the texture are read-only. Cannot also be a GrRenderTarget[Proxy]. + kReadOnly = 1 << 0, + + // RT-level + + // This flag is for use with GL only. It tells us that the internal render target wraps FBO 0. + kGLRTFBOIDIs0 = 1 << 1, + + // This means the render target is multisampled, and internally holds a non-msaa texture for + // resolving into. The render target resolves itself by blitting into this internal texture. + // (asTexture() might or might not return the internal texture, but if it does, we always + // resolve the render target before accessing this texture's data.) + kRequiresManualMSAAResolve = 1 << 2, + + // This means the pixels in the render target are write-only. This is used for Dawn and Metal + // swap chain targets which can be rendered to, but not read or copied. + kFramebufferOnly = 1 << 3, + + // This is a Vulkan only flag. If set the surface can be used as an input attachment in a + // shader. This is used for doing in shader blending where we want to sample from the same + // image we are drawing to. + kVkRTSupportsInputAttachment = 1 << 4, +}; + +GR_MAKE_BITFIELD_CLASS_OPS(GrInternalSurfaceFlags) + +// 'GR_MAKE_BITFIELD_CLASS_OPS' defines the & operator on GrInternalSurfaceFlags to return bool. +// We want to find the bitwise & with these masks, so we declare them as ints. +constexpr static int kGrInternalTextureFlagsMask = static_cast<int>( + GrInternalSurfaceFlags::kReadOnly); + +// We don't include kVkRTSupportsInputAttachment in this mask since we check it manually. We don't +// require that both the surface and proxy have matching values for this flag. Instead we require +// if the proxy has it set then the surface must also have it set. All other flags listed here must +// match on the proxy and surface. +// TODO: Add back kFramebufferOnly flag here once we update SkSurfaceCharacterization to take it +// as a flag. skbug.com/10672 +constexpr static int kGrInternalRenderTargetFlagsMask = static_cast<int>( + GrInternalSurfaceFlags::kGLRTFBOIDIs0 | + GrInternalSurfaceFlags::kRequiresManualMSAAResolve/* | + GrInternalSurfaceFlags::kFramebufferOnly*/); + +constexpr static int kGrInternalTextureRenderTargetFlagsMask = + kGrInternalTextureFlagsMask | kGrInternalRenderTargetFlagsMask; + +#ifdef SK_DEBUG +// Takes a pointer to a GrCaps, and will suppress prints if required +#define GrCapsDebugf(caps, ...) if (!(caps)->suppressPrints()) SkDebugf(__VA_ARGS__) +#else +#define GrCapsDebugf(caps, ...) do {} while (0) +#endif + +/** + * Specifies if the holder owns the backend, OpenGL or Vulkan, object. + */ +enum class GrBackendObjectOwnership : bool { + /** Holder does not destroy the backend object. */ + kBorrowed = false, + /** Holder destroys the backend object. */ + kOwned = true +}; + +/* + * Object for CPU-GPU synchronization + */ +typedef uint64_t GrFence; + +/** + * Used to include or exclude specific GPU path renderers for testing purposes. + */ +enum class GpuPathRenderers { + kNone = 0, // Always use software masks and/or DefaultPathRenderer. + kDashLine = 1 << 0, + kAtlas = 1 << 1, + kTessellation = 1 << 2, + kCoverageCounting = 1 << 3, + kAAHairline = 1 << 4, + kAAConvex = 1 << 5, + kAALinearizing = 1 << 6, + kSmall = 1 << 7, + kTriangulating = 1 << 8, + kDefault = ((1 << 9) - 1) // All path renderers. +}; + +/** + * Used to describe the current state of Mips on a GrTexture + */ +enum class GrMipmapStatus { + kNotAllocated, // Mips have not been allocated + kDirty, // Mips are allocated but the full mip tree does not have valid data + kValid, // All levels fully allocated and have valid data in them +}; + +GR_MAKE_BITFIELD_CLASS_OPS(GpuPathRenderers) + +/** + * Like SkColorType this describes a layout of pixel data in CPU memory. It specifies the channels, + * their type, and width. This exists so that the GPU backend can have private types that have no + * analog in the public facing SkColorType enum and omit types not implemented in the GPU backend. + * It does not refer to a texture format and the mapping to texture formats may be many-to-many. + * It does not specify the sRGB encoding of the stored values. The components are listed in order of + * where they appear in memory. In other words the first component listed is in the low bits and + * the last component in the high bits. + */ +enum class GrColorType { + kUnknown, + kAlpha_8, + kBGR_565, + kABGR_4444, // This name differs from SkColorType. kARGB_4444_SkColorType is misnamed. + kRGBA_8888, + kRGBA_8888_SRGB, + kRGB_888x, + kRG_88, + kBGRA_8888, + kRGBA_1010102, + kBGRA_1010102, + kGray_8, + kGrayAlpha_88, + kAlpha_F16, + kRGBA_F16, + kRGBA_F16_Clamped, + kRGBA_F32, + + kAlpha_16, + kRG_1616, + kRG_F16, + kRGBA_16161616, + + // Unusual types that come up after reading back in cases where we are reassigning the meaning + // of a texture format's channels to use for a particular color format but have to read back the + // data to a full RGBA quadruple. (e.g. using a R8 texture format as A8 color type but the API + // only supports reading to RGBA8.) None of these have SkColorType equivalents. + kAlpha_8xxx, + kAlpha_F32xxx, + kGray_8xxx, + + // Types used to initialize backend textures. + kRGB_888, + kR_8, + kR_16, + kR_F16, + kGray_F16, + kBGRA_4444, + kARGB_4444, + + kLast = kARGB_4444 +}; + +static const int kGrColorTypeCnt = static_cast<int>(GrColorType::kLast) + 1; + +static constexpr SkColorType GrColorTypeToSkColorType(GrColorType ct) { + switch (ct) { + case GrColorType::kUnknown: return kUnknown_SkColorType; + case GrColorType::kAlpha_8: return kAlpha_8_SkColorType; + case GrColorType::kBGR_565: return kRGB_565_SkColorType; + case GrColorType::kABGR_4444: return kARGB_4444_SkColorType; + case GrColorType::kRGBA_8888: return kRGBA_8888_SkColorType; + case GrColorType::kRGBA_8888_SRGB: return kSRGBA_8888_SkColorType; + case GrColorType::kRGB_888x: return kRGB_888x_SkColorType; + case GrColorType::kRG_88: return kR8G8_unorm_SkColorType; + case GrColorType::kBGRA_8888: return kBGRA_8888_SkColorType; + case GrColorType::kRGBA_1010102: return kRGBA_1010102_SkColorType; + case GrColorType::kBGRA_1010102: return kBGRA_1010102_SkColorType; + case GrColorType::kGray_8: return kGray_8_SkColorType; + case GrColorType::kGrayAlpha_88: return kUnknown_SkColorType; + case GrColorType::kAlpha_F16: return kA16_float_SkColorType; + case GrColorType::kRGBA_F16: return kRGBA_F16_SkColorType; + case GrColorType::kRGBA_F16_Clamped: return kRGBA_F16Norm_SkColorType; + case GrColorType::kRGBA_F32: return kRGBA_F32_SkColorType; + case GrColorType::kAlpha_8xxx: return kUnknown_SkColorType; + case GrColorType::kAlpha_F32xxx: return kUnknown_SkColorType; + case GrColorType::kGray_8xxx: return kUnknown_SkColorType; + case GrColorType::kAlpha_16: return kA16_unorm_SkColorType; + case GrColorType::kRG_1616: return kR16G16_unorm_SkColorType; + case GrColorType::kRGBA_16161616: return kR16G16B16A16_unorm_SkColorType; + case GrColorType::kRG_F16: return kR16G16_float_SkColorType; + case GrColorType::kRGB_888: return kUnknown_SkColorType; + case GrColorType::kR_8: return kUnknown_SkColorType; + case GrColorType::kR_16: return kUnknown_SkColorType; + case GrColorType::kR_F16: return kUnknown_SkColorType; + case GrColorType::kGray_F16: return kUnknown_SkColorType; + case GrColorType::kARGB_4444: return kUnknown_SkColorType; + case GrColorType::kBGRA_4444: return kUnknown_SkColorType; + } + SkUNREACHABLE; +} + +static constexpr GrColorType SkColorTypeToGrColorType(SkColorType ct) { + switch (ct) { + case kUnknown_SkColorType: return GrColorType::kUnknown; + case kAlpha_8_SkColorType: return GrColorType::kAlpha_8; + case kRGB_565_SkColorType: return GrColorType::kBGR_565; + case kARGB_4444_SkColorType: return GrColorType::kABGR_4444; + case kRGBA_8888_SkColorType: return GrColorType::kRGBA_8888; + case kSRGBA_8888_SkColorType: return GrColorType::kRGBA_8888_SRGB; + case kRGB_888x_SkColorType: return GrColorType::kRGB_888x; + case kBGRA_8888_SkColorType: return GrColorType::kBGRA_8888; + case kGray_8_SkColorType: return GrColorType::kGray_8; + case kRGBA_F16Norm_SkColorType: return GrColorType::kRGBA_F16_Clamped; + case kRGBA_F16_SkColorType: return GrColorType::kRGBA_F16; + case kRGBA_1010102_SkColorType: return GrColorType::kRGBA_1010102; + case kRGB_101010x_SkColorType: return GrColorType::kUnknown; + case kBGRA_1010102_SkColorType: return GrColorType::kBGRA_1010102; + case kBGR_101010x_SkColorType: return GrColorType::kUnknown; + case kRGBA_F32_SkColorType: return GrColorType::kRGBA_F32; + case kR8G8_unorm_SkColorType: return GrColorType::kRG_88; + case kA16_unorm_SkColorType: return GrColorType::kAlpha_16; + case kR16G16_unorm_SkColorType: return GrColorType::kRG_1616; + case kA16_float_SkColorType: return GrColorType::kAlpha_F16; + case kR16G16_float_SkColorType: return GrColorType::kRG_F16; + case kR16G16B16A16_unorm_SkColorType: return GrColorType::kRGBA_16161616; + } + SkUNREACHABLE; +} + +static constexpr uint32_t GrColorTypeChannelFlags(GrColorType ct) { + switch (ct) { + case GrColorType::kUnknown: return 0; + case GrColorType::kAlpha_8: return kAlpha_SkColorChannelFlag; + case GrColorType::kBGR_565: return kRGB_SkColorChannelFlags; + case GrColorType::kABGR_4444: return kRGBA_SkColorChannelFlags; + case GrColorType::kRGBA_8888: return kRGBA_SkColorChannelFlags; + case GrColorType::kRGBA_8888_SRGB: return kRGBA_SkColorChannelFlags; + case GrColorType::kRGB_888x: return kRGB_SkColorChannelFlags; + case GrColorType::kRG_88: return kRG_SkColorChannelFlags; + case GrColorType::kBGRA_8888: return kRGBA_SkColorChannelFlags; + case GrColorType::kRGBA_1010102: return kRGBA_SkColorChannelFlags; + case GrColorType::kBGRA_1010102: return kRGBA_SkColorChannelFlags; + case GrColorType::kGray_8: return kGray_SkColorChannelFlag; + case GrColorType::kGrayAlpha_88: return kGrayAlpha_SkColorChannelFlags; + case GrColorType::kAlpha_F16: return kAlpha_SkColorChannelFlag; + case GrColorType::kRGBA_F16: return kRGBA_SkColorChannelFlags; + case GrColorType::kRGBA_F16_Clamped: return kRGBA_SkColorChannelFlags; + case GrColorType::kRGBA_F32: return kRGBA_SkColorChannelFlags; + case GrColorType::kAlpha_8xxx: return kAlpha_SkColorChannelFlag; + case GrColorType::kAlpha_F32xxx: return kAlpha_SkColorChannelFlag; + case GrColorType::kGray_8xxx: return kGray_SkColorChannelFlag; + case GrColorType::kAlpha_16: return kAlpha_SkColorChannelFlag; + case GrColorType::kRG_1616: return kRG_SkColorChannelFlags; + case GrColorType::kRGBA_16161616: return kRGBA_SkColorChannelFlags; + case GrColorType::kRG_F16: return kRG_SkColorChannelFlags; + case GrColorType::kRGB_888: return kRGB_SkColorChannelFlags; + case GrColorType::kR_8: return kRed_SkColorChannelFlag; + case GrColorType::kR_16: return kRed_SkColorChannelFlag; + case GrColorType::kR_F16: return kRed_SkColorChannelFlag; + case GrColorType::kGray_F16: return kGray_SkColorChannelFlag; + case GrColorType::kARGB_4444: return kRGBA_SkColorChannelFlags; + case GrColorType::kBGRA_4444: return kRGBA_SkColorChannelFlags; + } + SkUNREACHABLE; +} + +/** + * Describes the encoding of channel data in a GrColorType. + */ +enum class GrColorTypeEncoding { + kUnorm, + kSRGBUnorm, + // kSnorm, + kFloat, + // kSint + // kUint +}; + +/** + * Describes a GrColorType by how many bits are used for each color component and how they are + * encoded. Currently all the non-zero channels share a single GrColorTypeEncoding. This could be + * expanded to store separate encodings and to indicate which bits belong to which components. + */ +class GrColorFormatDesc { +public: + static constexpr GrColorFormatDesc MakeRGBA(int rgba, GrColorTypeEncoding e) { + return {rgba, rgba, rgba, rgba, 0, e}; + } + + static constexpr GrColorFormatDesc MakeRGBA(int rgb, int a, GrColorTypeEncoding e) { + return {rgb, rgb, rgb, a, 0, e}; + } + + static constexpr GrColorFormatDesc MakeRGB(int rgb, GrColorTypeEncoding e) { + return {rgb, rgb, rgb, 0, 0, e}; + } + + static constexpr GrColorFormatDesc MakeRGB(int r, int g, int b, GrColorTypeEncoding e) { + return {r, g, b, 0, 0, e}; + } + + static constexpr GrColorFormatDesc MakeAlpha(int a, GrColorTypeEncoding e) { + return {0, 0, 0, a, 0, e}; + } + + static constexpr GrColorFormatDesc MakeR(int r, GrColorTypeEncoding e) { + return {r, 0, 0, 0, 0, e}; + } + + static constexpr GrColorFormatDesc MakeRG(int rg, GrColorTypeEncoding e) { + return {rg, rg, 0, 0, 0, e}; + } + + static constexpr GrColorFormatDesc MakeGray(int grayBits, GrColorTypeEncoding e) { + return {0, 0, 0, 0, grayBits, e}; + } + + static constexpr GrColorFormatDesc MakeGrayAlpha(int grayAlpha, GrColorTypeEncoding e) { + return {0, 0, 0, 0, grayAlpha, e}; + } + + static constexpr GrColorFormatDesc MakeInvalid() { return {}; } + + constexpr int r() const { return fRBits; } + constexpr int g() const { return fGBits; } + constexpr int b() const { return fBBits; } + constexpr int a() const { return fABits; } + constexpr int operator[](int c) const { + switch (c) { + case 0: return this->r(); + case 1: return this->g(); + case 2: return this->b(); + case 3: return this->a(); + } + SkUNREACHABLE; + } + + constexpr int gray() const { return fGrayBits; } + + constexpr GrColorTypeEncoding encoding() const { return fEncoding; } + +private: + int fRBits = 0; + int fGBits = 0; + int fBBits = 0; + int fABits = 0; + int fGrayBits = 0; + GrColorTypeEncoding fEncoding = GrColorTypeEncoding::kUnorm; + + constexpr GrColorFormatDesc() = default; + + constexpr GrColorFormatDesc(int r, int g, int b, int a, int gray, GrColorTypeEncoding encoding) + : fRBits(r), fGBits(g), fBBits(b), fABits(a), fGrayBits(gray), fEncoding(encoding) { + SkASSERT(r >= 0 && g >= 0 && b >= 0 && a >= 0 && gray >= 0); + SkASSERT(!gray || (!r && !g && !b)); + SkASSERT(r || g || b || a || gray); + } +}; + +static constexpr GrColorFormatDesc GrGetColorTypeDesc(GrColorType ct) { + switch (ct) { + case GrColorType::kUnknown: + return GrColorFormatDesc::MakeInvalid(); + case GrColorType::kAlpha_8: + return GrColorFormatDesc::MakeAlpha(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kBGR_565: + return GrColorFormatDesc::MakeRGB(5, 6, 5, GrColorTypeEncoding::kUnorm); + case GrColorType::kABGR_4444: + return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm); + case GrColorType::kRGBA_8888: + return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kRGBA_8888_SRGB: + return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kSRGBUnorm); + case GrColorType::kRGB_888x: + return GrColorFormatDesc::MakeRGB(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kRG_88: + return GrColorFormatDesc::MakeRG(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kBGRA_8888: + return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kRGBA_1010102: + return GrColorFormatDesc::MakeRGBA(10, 2, GrColorTypeEncoding::kUnorm); + case GrColorType::kBGRA_1010102: + return GrColorFormatDesc::MakeRGBA(10, 2, GrColorTypeEncoding::kUnorm); + case GrColorType::kGray_8: + return GrColorFormatDesc::MakeGray(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kGrayAlpha_88: + return GrColorFormatDesc::MakeGrayAlpha(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kAlpha_F16: + return GrColorFormatDesc::MakeAlpha(16, GrColorTypeEncoding::kFloat); + case GrColorType::kRGBA_F16: + return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kFloat); + case GrColorType::kRGBA_F16_Clamped: + return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kFloat); + case GrColorType::kRGBA_F32: + return GrColorFormatDesc::MakeRGBA(32, GrColorTypeEncoding::kFloat); + case GrColorType::kAlpha_8xxx: + return GrColorFormatDesc::MakeAlpha(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kAlpha_F32xxx: + return GrColorFormatDesc::MakeAlpha(32, GrColorTypeEncoding::kFloat); + case GrColorType::kGray_8xxx: + return GrColorFormatDesc::MakeGray(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kAlpha_16: + return GrColorFormatDesc::MakeAlpha(16, GrColorTypeEncoding::kUnorm); + case GrColorType::kRG_1616: + return GrColorFormatDesc::MakeRG(16, GrColorTypeEncoding::kUnorm); + case GrColorType::kRGBA_16161616: + return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kUnorm); + case GrColorType::kRG_F16: + return GrColorFormatDesc::MakeRG(16, GrColorTypeEncoding::kFloat); + case GrColorType::kRGB_888: + return GrColorFormatDesc::MakeRGB(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kR_8: + return GrColorFormatDesc::MakeR(8, GrColorTypeEncoding::kUnorm); + case GrColorType::kR_16: + return GrColorFormatDesc::MakeR(16, GrColorTypeEncoding::kUnorm); + case GrColorType::kR_F16: + return GrColorFormatDesc::MakeR(16, GrColorTypeEncoding::kFloat); + case GrColorType::kGray_F16: + return GrColorFormatDesc::MakeGray(16, GrColorTypeEncoding::kFloat); + case GrColorType::kARGB_4444: + return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm); + case GrColorType::kBGRA_4444: + return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm); + } + SkUNREACHABLE; +} + +static constexpr GrClampType GrColorTypeClampType(GrColorType colorType) { + if (GrGetColorTypeDesc(colorType).encoding() == GrColorTypeEncoding::kUnorm || + GrGetColorTypeDesc(colorType).encoding() == GrColorTypeEncoding::kSRGBUnorm) { + return GrClampType::kAuto; + } + return GrColorType::kRGBA_F16_Clamped == colorType ? GrClampType::kManual : GrClampType::kNone; +} + +// Consider a color type "wider" than n if it has more than n bits for any its representable +// channels. +static constexpr bool GrColorTypeIsWiderThan(GrColorType colorType, int n) { + SkASSERT(n > 0); + auto desc = GrGetColorTypeDesc(colorType); + return (desc.r() && desc.r() > n )|| + (desc.g() && desc.g() > n) || + (desc.b() && desc.b() > n) || + (desc.a() && desc.a() > n) || + (desc.gray() && desc.gray() > n); +} + +static constexpr bool GrColorTypeIsAlphaOnly(GrColorType ct) { + return GrColorTypeChannelFlags(ct) == kAlpha_SkColorChannelFlag; +} + +static constexpr bool GrColorTypeHasAlpha(GrColorType ct) { + return GrColorTypeChannelFlags(ct) & kAlpha_SkColorChannelFlag; +} + +static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct) { + switch (ct) { + case GrColorType::kUnknown: return 0; + case GrColorType::kAlpha_8: return 1; + case GrColorType::kBGR_565: return 2; + case GrColorType::kABGR_4444: return 2; + case GrColorType::kRGBA_8888: return 4; + case GrColorType::kRGBA_8888_SRGB: return 4; + case GrColorType::kRGB_888x: return 4; + case GrColorType::kRG_88: return 2; + case GrColorType::kBGRA_8888: return 4; + case GrColorType::kRGBA_1010102: return 4; + case GrColorType::kBGRA_1010102: return 4; + case GrColorType::kGray_8: return 1; + case GrColorType::kGrayAlpha_88: return 2; + case GrColorType::kAlpha_F16: return 2; + case GrColorType::kRGBA_F16: return 8; + case GrColorType::kRGBA_F16_Clamped: return 8; + case GrColorType::kRGBA_F32: return 16; + case GrColorType::kAlpha_8xxx: return 4; + case GrColorType::kAlpha_F32xxx: return 16; + case GrColorType::kGray_8xxx: return 4; + case GrColorType::kAlpha_16: return 2; + case GrColorType::kRG_1616: return 4; + case GrColorType::kRGBA_16161616: return 8; + case GrColorType::kRG_F16: return 4; + case GrColorType::kRGB_888: return 3; + case GrColorType::kR_8: return 1; + case GrColorType::kR_16: return 2; + case GrColorType::kR_F16: return 2; + case GrColorType::kGray_F16: return 2; + case GrColorType::kARGB_4444: return 2; + case GrColorType::kBGRA_4444: return 2; + } + SkUNREACHABLE; +} + +// In general we try to not mix CompressionType and ColorType, but currently SkImage still requires +// an SkColorType even for CompressedTypes so we need some conversion. +static constexpr SkColorType GrCompressionTypeToSkColorType(SkImage::CompressionType compression) { + switch (compression) { + case SkImage::CompressionType::kNone: return kUnknown_SkColorType; + case SkImage::CompressionType::kETC2_RGB8_UNORM: return kRGB_888x_SkColorType; + case SkImage::CompressionType::kBC1_RGB8_UNORM: return kRGB_888x_SkColorType; + case SkImage::CompressionType::kBC1_RGBA8_UNORM: return kRGBA_8888_SkColorType; + } + + SkUNREACHABLE; +} + +static constexpr GrColorType GrMaskFormatToColorType(GrMaskFormat format) { + switch (format) { + case kA8_GrMaskFormat: + return GrColorType::kAlpha_8; + case kA565_GrMaskFormat: + return GrColorType::kBGR_565; + case kARGB_GrMaskFormat: + return GrColorType::kRGBA_8888; + } + SkUNREACHABLE; +} + +/** + * Ref-counted object that calls a callback from its destructor. + */ +class GrRefCntedCallback : public SkNVRefCnt<GrRefCntedCallback> { +public: + using Context = void*; + using Callback = void (*)(Context); + + static sk_sp<GrRefCntedCallback> Make(Callback proc, Context ctx) { + if (!proc) { + return nullptr; + } + return sk_sp<GrRefCntedCallback>(new GrRefCntedCallback(proc, ctx)); + } + + ~GrRefCntedCallback() { fReleaseProc(fReleaseCtx); } + + Context context() const { return fReleaseCtx; } + +private: + GrRefCntedCallback(Callback proc, Context ctx) : fReleaseProc(proc), fReleaseCtx(ctx) {} + GrRefCntedCallback(const GrRefCntedCallback&) = delete; + GrRefCntedCallback(GrRefCntedCallback&&) = delete; + GrRefCntedCallback& operator=(const GrRefCntedCallback&) = delete; + GrRefCntedCallback& operator=(GrRefCntedCallback&&) = delete; + + Callback fReleaseProc; + Context fReleaseCtx; +}; + +enum class GrDstSampleFlags { + kNone = 0, + kRequiresTextureBarrier = 1 << 0, + kAsInputAttachment = 1 << 1, +}; +GR_MAKE_BITFIELD_CLASS_OPS(GrDstSampleFlags) + +using GrVisitProxyFunc = std::function<void(GrSurfaceProxy*, GrMipmapped)>; + +#if defined(SK_DEBUG) || GR_TEST_UTILS || defined(SK_ENABLE_DUMP_GPU) +static constexpr const char* GrBackendApiToStr(GrBackendApi api) { + switch (api) { + case GrBackendApi::kOpenGL: return "OpenGL"; + case GrBackendApi::kVulkan: return "Vulkan"; + case GrBackendApi::kMetal: return "Metal"; + case GrBackendApi::kDirect3D: return "Direct3D"; + case GrBackendApi::kDawn: return "Dawn"; + case GrBackendApi::kMock: return "Mock"; + } + SkUNREACHABLE; +} + +static constexpr const char* GrColorTypeToStr(GrColorType ct) { + switch (ct) { + case GrColorType::kUnknown: return "kUnknown"; + case GrColorType::kAlpha_8: return "kAlpha_8"; + case GrColorType::kBGR_565: return "kRGB_565"; + case GrColorType::kABGR_4444: return "kABGR_4444"; + case GrColorType::kRGBA_8888: return "kRGBA_8888"; + case GrColorType::kRGBA_8888_SRGB: return "kRGBA_8888_SRGB"; + case GrColorType::kRGB_888x: return "kRGB_888x"; + case GrColorType::kRG_88: return "kRG_88"; + case GrColorType::kBGRA_8888: return "kBGRA_8888"; + case GrColorType::kRGBA_1010102: return "kRGBA_1010102"; + case GrColorType::kBGRA_1010102: return "kBGRA_1010102"; + case GrColorType::kGray_8: return "kGray_8"; + case GrColorType::kGrayAlpha_88: return "kGrayAlpha_88"; + case GrColorType::kAlpha_F16: return "kAlpha_F16"; + case GrColorType::kRGBA_F16: return "kRGBA_F16"; + case GrColorType::kRGBA_F16_Clamped: return "kRGBA_F16_Clamped"; + case GrColorType::kRGBA_F32: return "kRGBA_F32"; + case GrColorType::kAlpha_8xxx: return "kAlpha_8xxx"; + case GrColorType::kAlpha_F32xxx: return "kAlpha_F32xxx"; + case GrColorType::kGray_8xxx: return "kGray_8xxx"; + case GrColorType::kAlpha_16: return "kAlpha_16"; + case GrColorType::kRG_1616: return "kRG_1616"; + case GrColorType::kRGBA_16161616: return "kRGBA_16161616"; + case GrColorType::kRG_F16: return "kRG_F16"; + case GrColorType::kRGB_888: return "kRGB_888"; + case GrColorType::kR_8: return "kR_8"; + case GrColorType::kR_16: return "kR_16"; + case GrColorType::kR_F16: return "kR_F16"; + case GrColorType::kGray_F16: return "kGray_F16"; + case GrColorType::kARGB_4444: return "kARGB_4444"; + case GrColorType::kBGRA_4444: return "kBGRA_4444"; + } + SkUNREACHABLE; +} + +static constexpr const char* GrCompressionTypeToStr(SkImage::CompressionType compression) { + switch (compression) { + case SkImage::CompressionType::kNone: return "kNone"; + case SkImage::CompressionType::kETC2_RGB8_UNORM: return "kETC2_RGB8_UNORM"; + case SkImage::CompressionType::kBC1_RGB8_UNORM: return "kBC1_RGB8_UNORM"; + case SkImage::CompressionType::kBC1_RGBA8_UNORM: return "kBC1_RGBA8_UNORM"; + } + SkUNREACHABLE; +} +#endif + +#endif diff --git a/src/deps/skia/include/private/GrVkTypesPriv.h b/src/deps/skia/include/private/GrVkTypesPriv.h new file mode 100644 index 000000000..cec98c404 --- /dev/null +++ b/src/deps/skia/include/private/GrVkTypesPriv.h @@ -0,0 +1,107 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrVkTypesPriv_DEFINED +#define GrVkTypesPriv_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/gpu/vk/GrVkTypes.h" + +class GrBackendSurfaceMutableStateImpl; + +// This struct is to used to store the the actual information about the vulkan backend image on the +// GrBackendTexture and GrBackendRenderTarget. When a client calls getVkImageInfo on a +// GrBackendTexture/RenderTarget, we use the GrVkBackendSurfaceInfo to create a snapshot +// GrVkImgeInfo object. Internally, this uses a ref count GrVkImageLayout object to track the +// current VkImageLayout which can be shared with an internal GrVkImage so that layout updates can +// be seen by all users of the image. +struct GrVkBackendSurfaceInfo { + GrVkBackendSurfaceInfo(GrVkImageInfo info) : fImageInfo(info) {} + + void cleanup(); + + GrVkBackendSurfaceInfo& operator=(const GrVkBackendSurfaceInfo&) = delete; + + // Assigns the passed in GrVkBackendSurfaceInfo to this object. if isValid is true we will also + // attempt to unref the old fLayout on this object. + void assign(const GrVkBackendSurfaceInfo&, bool isValid); + + GrVkImageInfo snapImageInfo(const GrBackendSurfaceMutableStateImpl*) const; + + bool isProtected() const { return fImageInfo.fProtected == GrProtected::kYes; } +#if GR_TEST_UTILS + bool operator==(const GrVkBackendSurfaceInfo& that) const; +#endif + +private: + GrVkImageInfo fImageInfo; +}; + +class GrVkSharedImageInfo { +public: + GrVkSharedImageInfo(VkImageLayout layout, uint32_t queueFamilyIndex) + : fLayout(layout) + , fQueueFamilyIndex(queueFamilyIndex) {} + + GrVkSharedImageInfo& operator=(const GrVkSharedImageInfo& that) { + fLayout = that.getImageLayout(); + fQueueFamilyIndex = that.getQueueFamilyIndex(); + return *this; + } + + void setImageLayout(VkImageLayout layout) { + // Defaulting to use std::memory_order_seq_cst + fLayout.store(layout); + } + + VkImageLayout getImageLayout() const { + // Defaulting to use std::memory_order_seq_cst + return fLayout.load(); + } + + void setQueueFamilyIndex(uint32_t queueFamilyIndex) { + // Defaulting to use std::memory_order_seq_cst + fQueueFamilyIndex.store(queueFamilyIndex); + } + + uint32_t getQueueFamilyIndex() const { + // Defaulting to use std::memory_order_seq_cst + return fQueueFamilyIndex.load(); + } + +private: + std::atomic<VkImageLayout> fLayout; + std::atomic<uint32_t> fQueueFamilyIndex; +}; + +struct GrVkImageSpec { + GrVkImageSpec() + : fImageTiling(VK_IMAGE_TILING_OPTIMAL) + , fFormat(VK_FORMAT_UNDEFINED) + , fImageUsageFlags(0) + , fSharingMode(VK_SHARING_MODE_EXCLUSIVE) {} + + GrVkImageSpec(const GrVkSurfaceInfo& info) + : fImageTiling(info.fImageTiling) + , fFormat(info.fFormat) + , fImageUsageFlags(info.fImageUsageFlags) + , fYcbcrConversionInfo(info.fYcbcrConversionInfo) + , fSharingMode(info.fSharingMode) {} + + VkImageTiling fImageTiling; + VkFormat fFormat; + VkImageUsageFlags fImageUsageFlags; + GrVkYcbcrConversionInfo fYcbcrConversionInfo; + VkSharingMode fSharingMode; +}; + +GrVkSurfaceInfo GrVkImageSpecToSurfaceInfo(const GrVkImageSpec& vkSpec, + uint32_t sampleCount, + uint32_t levelCount, + GrProtected isProtected); + +#endif diff --git a/src/deps/skia/include/private/OWNERS b/src/deps/skia/include/private/OWNERS new file mode 100644 index 000000000..7cf12a2a7 --- /dev/null +++ b/src/deps/skia/include/private/OWNERS @@ -0,0 +1,4 @@ +# include/ has a restricted set of reviewers (to limit changes to public API) +# Files in this directory follow the same rules as the rest of Skia, though: + +file:../../OWNERS diff --git a/src/deps/skia/include/private/SkBitmaskEnum.h b/src/deps/skia/include/private/SkBitmaskEnum.h new file mode 100644 index 000000000..b25045359 --- /dev/null +++ b/src/deps/skia/include/private/SkBitmaskEnum.h @@ -0,0 +1,59 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef SkEnumOperators_DEFINED +#define SkEnumOperators_DEFINED + +#include <type_traits> + +namespace sknonstd { +template <typename T> struct is_bitmask_enum : std::false_type {}; + +template <typename E> +std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, bool> constexpr Any(E e) { + return static_cast<std::underlying_type_t<E>>(e) != 0; +} +} // namespace sknonstd + +template <typename E> +std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator|(E l, E r) { + using U = std::underlying_type_t<E>; + return static_cast<E>(static_cast<U>(l) | static_cast<U>(r)); +} + +template <typename E> +std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator|=(E& l, E r) { + return l = l | r; +} + +template <typename E> +std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator&(E l, E r) { + using U = std::underlying_type_t<E>; + return static_cast<E>(static_cast<U>(l) & static_cast<U>(r)); +} + +template <typename E> +std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator&=(E& l, E r) { + return l = l & r; +} + +template <typename E> +std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator^(E l, E r) { + using U = std::underlying_type_t<E>; + return static_cast<E>(static_cast<U>(l) ^ static_cast<U>(r)); +} + +template <typename E> +std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator^=(E& l, E r) { + return l = l ^ r; +} + +template <typename E> +std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator~(E e) { + return static_cast<E>(~static_cast<std::underlying_type_t<E>>(e)); +} + +#endif // SkEnumOperators_DEFINED diff --git a/src/deps/skia/include/private/SkChecksum.h b/src/deps/skia/include/private/SkChecksum.h new file mode 100644 index 000000000..6339239d6 --- /dev/null +++ b/src/deps/skia/include/private/SkChecksum.h @@ -0,0 +1,66 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkChecksum_DEFINED +#define SkChecksum_DEFINED + +#include "include/core/SkString.h" +#include "include/core/SkTypes.h" +#include "include/private/SkNoncopyable.h" +#include "include/private/SkOpts_spi.h" +#include "include/private/SkTLogic.h" + +class SkChecksum : SkNoncopyable { +public: + /** + * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you + * suspect its low bits aren't well mixed. + * + * This is the Murmur3 finalizer. + */ + static uint32_t Mix(uint32_t hash) { + hash ^= hash >> 16; + hash *= 0x85ebca6b; + hash ^= hash >> 13; + hash *= 0xc2b2ae35; + hash ^= hash >> 16; + return hash; + } + + /** + * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you + * suspect its low bits aren't well mixed. + * + * This version is 2-lines cheaper than Mix, but seems to be sufficient for the font cache. + */ + static uint32_t CheapMix(uint32_t hash) { + hash ^= hash >> 16; + hash *= 0x85ebca6b; + hash ^= hash >> 16; + return hash; + } +}; + +// SkGoodHash should usually be your first choice in hashing data. +// It should be both reasonably fast and high quality. +struct SkGoodHash { + template <typename K> + std::enable_if_t<sizeof(K) == 4, uint32_t> operator()(const K& k) const { + return SkChecksum::Mix(*(const uint32_t*)&k); + } + + template <typename K> + std::enable_if_t<sizeof(K) != 4, uint32_t> operator()(const K& k) const { + return SkOpts::hash_fn(&k, sizeof(K), 0); + } + + uint32_t operator()(const SkString& k) const { + return SkOpts::hash_fn(k.c_str(), k.size(), 0); + } +}; + +#endif diff --git a/src/deps/skia/include/private/SkColorData.h b/src/deps/skia/include/private/SkColorData.h new file mode 100644 index 000000000..a59e7b044 --- /dev/null +++ b/src/deps/skia/include/private/SkColorData.h @@ -0,0 +1,441 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkColorData_DEFINED +#define SkColorData_DEFINED + +#include "include/core/SkColor.h" +#include "include/core/SkColorPriv.h" +#include "include/private/SkNx.h" +#include "include/private/SkTo.h" + +//////////////////////////////////////////////////////////////////////////////////////////// +// Convert a 16bit pixel to a 32bit pixel + +#define SK_R16_BITS 5 +#define SK_G16_BITS 6 +#define SK_B16_BITS 5 + +#define SK_R16_SHIFT (SK_B16_BITS + SK_G16_BITS) +#define SK_G16_SHIFT (SK_B16_BITS) +#define SK_B16_SHIFT 0 + +#define SK_R16_MASK ((1 << SK_R16_BITS) - 1) +#define SK_G16_MASK ((1 << SK_G16_BITS) - 1) +#define SK_B16_MASK ((1 << SK_B16_BITS) - 1) + +#define SkGetPackedR16(color) (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK) +#define SkGetPackedG16(color) (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK) +#define SkGetPackedB16(color) (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK) + +static inline unsigned SkR16ToR32(unsigned r) { + return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8)); +} + +static inline unsigned SkG16ToG32(unsigned g) { + return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8)); +} + +static inline unsigned SkB16ToB32(unsigned b) { + return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8)); +} + +#define SkPacked16ToR32(c) SkR16ToR32(SkGetPackedR16(c)) +#define SkPacked16ToG32(c) SkG16ToG32(SkGetPackedG16(c)) +#define SkPacked16ToB32(c) SkB16ToB32(SkGetPackedB16(c)) + +////////////////////////////////////////////////////////////////////////////// + +#define SkASSERT_IS_BYTE(x) SkASSERT(0 == ((x) & ~0xFFu)) + +// Reverse the bytes coorsponding to RED and BLUE in a packed pixels. Note the +// pair of them are in the same 2 slots in both RGBA and BGRA, thus there is +// no need to pass in the colortype to this function. +static inline uint32_t SkSwizzle_RB(uint32_t c) { + static const uint32_t kRBMask = (0xFF << SK_R32_SHIFT) | (0xFF << SK_B32_SHIFT); + + unsigned c0 = (c >> SK_R32_SHIFT) & 0xFF; + unsigned c1 = (c >> SK_B32_SHIFT) & 0xFF; + return (c & ~kRBMask) | (c0 << SK_B32_SHIFT) | (c1 << SK_R32_SHIFT); +} + +static inline uint32_t SkPackARGB_as_RGBA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { + SkASSERT_IS_BYTE(a); + SkASSERT_IS_BYTE(r); + SkASSERT_IS_BYTE(g); + SkASSERT_IS_BYTE(b); + return (a << SK_RGBA_A32_SHIFT) | (r << SK_RGBA_R32_SHIFT) | + (g << SK_RGBA_G32_SHIFT) | (b << SK_RGBA_B32_SHIFT); +} + +static inline uint32_t SkPackARGB_as_BGRA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { + SkASSERT_IS_BYTE(a); + SkASSERT_IS_BYTE(r); + SkASSERT_IS_BYTE(g); + SkASSERT_IS_BYTE(b); + return (a << SK_BGRA_A32_SHIFT) | (r << SK_BGRA_R32_SHIFT) | + (g << SK_BGRA_G32_SHIFT) | (b << SK_BGRA_B32_SHIFT); +} + +static inline SkPMColor SkSwizzle_RGBA_to_PMColor(uint32_t c) { +#ifdef SK_PMCOLOR_IS_RGBA + return c; +#else + return SkSwizzle_RB(c); +#endif +} + +static inline SkPMColor SkSwizzle_BGRA_to_PMColor(uint32_t c) { +#ifdef SK_PMCOLOR_IS_BGRA + return c; +#else + return SkSwizzle_RB(c); +#endif +} + +////////////////////////////////////////////////////////////////////////////// + +///@{ +/** See ITU-R Recommendation BT.709 at http://www.itu.int/rec/R-REC-BT.709/ .*/ +#define SK_ITU_BT709_LUM_COEFF_R (0.2126f) +#define SK_ITU_BT709_LUM_COEFF_G (0.7152f) +#define SK_ITU_BT709_LUM_COEFF_B (0.0722f) +///@} + +///@{ +/** A float value which specifies this channel's contribution to luminance. */ +#define SK_LUM_COEFF_R SK_ITU_BT709_LUM_COEFF_R +#define SK_LUM_COEFF_G SK_ITU_BT709_LUM_COEFF_G +#define SK_LUM_COEFF_B SK_ITU_BT709_LUM_COEFF_B +///@} + +/** Computes the luminance from the given r, g, and b in accordance with + SK_LUM_COEFF_X. For correct results, r, g, and b should be in linear space. +*/ +static inline U8CPU SkComputeLuminance(U8CPU r, U8CPU g, U8CPU b) { + //The following is + //r * SK_LUM_COEFF_R + g * SK_LUM_COEFF_G + b * SK_LUM_COEFF_B + //with SK_LUM_COEFF_X in 1.8 fixed point (rounding adjusted to sum to 256). + return (r * 54 + g * 183 + b * 19) >> 8; +} + +/** Calculates 256 - (value * alpha256) / 255 in range [0,256], + * for [0,255] value and [0,256] alpha256. + */ +static inline U16CPU SkAlphaMulInv256(U16CPU value, U16CPU alpha256) { + unsigned prod = 0xFFFF - value * alpha256; + return (prod + (prod >> 8)) >> 8; +} + +// The caller may want negative values, so keep all params signed (int) +// so we don't accidentally slip into unsigned math and lose the sign +// extension when we shift (in SkAlphaMul) +static inline int SkAlphaBlend(int src, int dst, int scale256) { + SkASSERT((unsigned)scale256 <= 256); + return dst + SkAlphaMul(src - dst, scale256); +} + +static inline uint16_t SkPackRGB16(unsigned r, unsigned g, unsigned b) { + SkASSERT(r <= SK_R16_MASK); + SkASSERT(g <= SK_G16_MASK); + SkASSERT(b <= SK_B16_MASK); + + return SkToU16((r << SK_R16_SHIFT) | (g << SK_G16_SHIFT) | (b << SK_B16_SHIFT)); +} + +#define SK_R16_MASK_IN_PLACE (SK_R16_MASK << SK_R16_SHIFT) +#define SK_G16_MASK_IN_PLACE (SK_G16_MASK << SK_G16_SHIFT) +#define SK_B16_MASK_IN_PLACE (SK_B16_MASK << SK_B16_SHIFT) + +/////////////////////////////////////////////////////////////////////////////// + +/** + * Abstract 4-byte interpolation, implemented on top of SkPMColor + * utility functions. Third parameter controls blending of the first two: + * (src, dst, 0) returns dst + * (src, dst, 0xFF) returns src + * scale is [0..256], unlike SkFourByteInterp which takes [0..255] + */ +static inline SkPMColor SkFourByteInterp256(SkPMColor src, SkPMColor dst, int scale) { + unsigned a = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedA32(src), SkGetPackedA32(dst), scale)); + unsigned r = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedR32(src), SkGetPackedR32(dst), scale)); + unsigned g = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedG32(src), SkGetPackedG32(dst), scale)); + unsigned b = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedB32(src), SkGetPackedB32(dst), scale)); + + return SkPackARGB32(a, r, g, b); +} + +/** + * Abstract 4-byte interpolation, implemented on top of SkPMColor + * utility functions. Third parameter controls blending of the first two: + * (src, dst, 0) returns dst + * (src, dst, 0xFF) returns src + */ +static inline SkPMColor SkFourByteInterp(SkPMColor src, SkPMColor dst, U8CPU srcWeight) { + int scale = (int)SkAlpha255To256(srcWeight); + return SkFourByteInterp256(src, dst, scale); +} + +/** + * 0xAARRGGBB -> 0x00AA00GG, 0x00RR00BB + */ +static inline void SkSplay(uint32_t color, uint32_t* ag, uint32_t* rb) { + const uint32_t mask = 0x00FF00FF; + *ag = (color >> 8) & mask; + *rb = color & mask; +} + +/** + * 0xAARRGGBB -> 0x00AA00GG00RR00BB + * (note, ARGB -> AGRB) + */ +static inline uint64_t SkSplay(uint32_t color) { + const uint32_t mask = 0x00FF00FF; + uint64_t agrb = (color >> 8) & mask; // 0x0000000000AA00GG + agrb <<= 32; // 0x00AA00GG00000000 + agrb |= color & mask; // 0x00AA00GG00RR00BB + return agrb; +} + +/** + * 0xAAxxGGxx, 0xRRxxBBxx-> 0xAARRGGBB + */ +static inline uint32_t SkUnsplay(uint32_t ag, uint32_t rb) { + const uint32_t mask = 0xFF00FF00; + return (ag & mask) | ((rb & mask) >> 8); +} + +/** + * 0xAAxxGGxxRRxxBBxx -> 0xAARRGGBB + * (note, AGRB -> ARGB) + */ +static inline uint32_t SkUnsplay(uint64_t agrb) { + const uint32_t mask = 0xFF00FF00; + return SkPMColor( + ((agrb & mask) >> 8) | // 0x00RR00BB + ((agrb >> 32) & mask)); // 0xAARRGGBB +} + +static inline SkPMColor SkFastFourByteInterp256_32(SkPMColor src, SkPMColor dst, unsigned scale) { + SkASSERT(scale <= 256); + + // Two 8-bit blends per two 32-bit registers, with space to make sure the math doesn't collide. + uint32_t src_ag, src_rb, dst_ag, dst_rb; + SkSplay(src, &src_ag, &src_rb); + SkSplay(dst, &dst_ag, &dst_rb); + + const uint32_t ret_ag = src_ag * scale + (256 - scale) * dst_ag; + const uint32_t ret_rb = src_rb * scale + (256 - scale) * dst_rb; + + return SkUnsplay(ret_ag, ret_rb); +} + +static inline SkPMColor SkFastFourByteInterp256_64(SkPMColor src, SkPMColor dst, unsigned scale) { + SkASSERT(scale <= 256); + // Four 8-bit blends in one 64-bit register, with space to make sure the math doesn't collide. + return SkUnsplay(SkSplay(src) * scale + (256-scale) * SkSplay(dst)); +} + +// TODO(mtklein): Replace slow versions with fast versions, using scale + (scale>>7) everywhere. + +/** + * Same as SkFourByteInterp256, but faster. + */ +static inline SkPMColor SkFastFourByteInterp256(SkPMColor src, SkPMColor dst, unsigned scale) { + // On a 64-bit machine, _64 is about 10% faster than _32, but ~40% slower on a 32-bit machine. + if (sizeof(void*) == 4) { + return SkFastFourByteInterp256_32(src, dst, scale); + } else { + return SkFastFourByteInterp256_64(src, dst, scale); + } +} + +/** + * Nearly the same as SkFourByteInterp, but faster and a touch more accurate, due to better + * srcWeight scaling to [0, 256]. + */ +static inline SkPMColor SkFastFourByteInterp(SkPMColor src, SkPMColor dst, U8CPU srcWeight) { + SkASSERT(srcWeight <= 255); + // scale = srcWeight + (srcWeight >> 7) is more accurate than + // scale = srcWeight + 1, but 7% slower + return SkFastFourByteInterp256(src, dst, srcWeight + (srcWeight >> 7)); +} + +/** + * Interpolates between colors src and dst using [0,256] scale. + */ +static inline SkPMColor SkPMLerp(SkPMColor src, SkPMColor dst, unsigned scale) { + return SkFastFourByteInterp256(src, dst, scale); +} + +static inline SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa) { + SkASSERT((unsigned)aa <= 255); + + unsigned src_scale = SkAlpha255To256(aa); + unsigned dst_scale = SkAlphaMulInv256(SkGetPackedA32(src), src_scale); + + const uint32_t mask = 0xFF00FF; + + uint32_t src_rb = (src & mask) * src_scale; + uint32_t src_ag = ((src >> 8) & mask) * src_scale; + + uint32_t dst_rb = (dst & mask) * dst_scale; + uint32_t dst_ag = ((dst >> 8) & mask) * dst_scale; + + return (((src_rb + dst_rb) >> 8) & mask) | ((src_ag + dst_ag) & ~mask); +} + +//////////////////////////////////////////////////////////////////////////////////////////// +// Convert a 32bit pixel to a 16bit pixel (no dither) + +#define SkR32ToR16_MACRO(r) ((unsigned)(r) >> (SK_R32_BITS - SK_R16_BITS)) +#define SkG32ToG16_MACRO(g) ((unsigned)(g) >> (SK_G32_BITS - SK_G16_BITS)) +#define SkB32ToB16_MACRO(b) ((unsigned)(b) >> (SK_B32_BITS - SK_B16_BITS)) + +#ifdef SK_DEBUG + static inline unsigned SkR32ToR16(unsigned r) { + SkR32Assert(r); + return SkR32ToR16_MACRO(r); + } + static inline unsigned SkG32ToG16(unsigned g) { + SkG32Assert(g); + return SkG32ToG16_MACRO(g); + } + static inline unsigned SkB32ToB16(unsigned b) { + SkB32Assert(b); + return SkB32ToB16_MACRO(b); + } +#else + #define SkR32ToR16(r) SkR32ToR16_MACRO(r) + #define SkG32ToG16(g) SkG32ToG16_MACRO(g) + #define SkB32ToB16(b) SkB32ToB16_MACRO(b) +#endif + +static inline U16CPU SkPixel32ToPixel16(SkPMColor c) { + unsigned r = ((c >> (SK_R32_SHIFT + (8 - SK_R16_BITS))) & SK_R16_MASK) << SK_R16_SHIFT; + unsigned g = ((c >> (SK_G32_SHIFT + (8 - SK_G16_BITS))) & SK_G16_MASK) << SK_G16_SHIFT; + unsigned b = ((c >> (SK_B32_SHIFT + (8 - SK_B16_BITS))) & SK_B16_MASK) << SK_B16_SHIFT; + return r | g | b; +} + +static inline U16CPU SkPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) { + return (SkR32ToR16(r) << SK_R16_SHIFT) | + (SkG32ToG16(g) << SK_G16_SHIFT) | + (SkB32ToB16(b) << SK_B16_SHIFT); +} + +///////////////////////////////////////////////////////////////////////////////////////// + +/* SrcOver the 32bit src color with the 16bit dst, returning a 16bit value + (with dirt in the high 16bits, so caller beware). +*/ +static inline U16CPU SkSrcOver32To16(SkPMColor src, uint16_t dst) { + unsigned sr = SkGetPackedR32(src); + unsigned sg = SkGetPackedG32(src); + unsigned sb = SkGetPackedB32(src); + + unsigned dr = SkGetPackedR16(dst); + unsigned dg = SkGetPackedG16(dst); + unsigned db = SkGetPackedB16(dst); + + unsigned isa = 255 - SkGetPackedA32(src); + + dr = (sr + SkMul16ShiftRound(dr, isa, SK_R16_BITS)) >> (8 - SK_R16_BITS); + dg = (sg + SkMul16ShiftRound(dg, isa, SK_G16_BITS)) >> (8 - SK_G16_BITS); + db = (sb + SkMul16ShiftRound(db, isa, SK_B16_BITS)) >> (8 - SK_B16_BITS); + + return SkPackRGB16(dr, dg, db); +} + +static inline SkColor SkPixel16ToColor(U16CPU src) { + SkASSERT(src == SkToU16(src)); + + unsigned r = SkPacked16ToR32(src); + unsigned g = SkPacked16ToG32(src); + unsigned b = SkPacked16ToB32(src); + + SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src)); + SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src)); + SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src)); + + return SkColorSetRGB(r, g, b); +} + +/////////////////////////////////////////////////////////////////////////////// + +typedef uint16_t SkPMColor16; + +// Put in OpenGL order (r g b a) +#define SK_A4444_SHIFT 0 +#define SK_R4444_SHIFT 12 +#define SK_G4444_SHIFT 8 +#define SK_B4444_SHIFT 4 + +static inline U8CPU SkReplicateNibble(unsigned nib) { + SkASSERT(nib <= 0xF); + return (nib << 4) | nib; +} + +#define SkGetPackedA4444(c) (((unsigned)(c) >> SK_A4444_SHIFT) & 0xF) +#define SkGetPackedR4444(c) (((unsigned)(c) >> SK_R4444_SHIFT) & 0xF) +#define SkGetPackedG4444(c) (((unsigned)(c) >> SK_G4444_SHIFT) & 0xF) +#define SkGetPackedB4444(c) (((unsigned)(c) >> SK_B4444_SHIFT) & 0xF) + +#define SkPacked4444ToA32(c) SkReplicateNibble(SkGetPackedA4444(c)) + +static inline SkPMColor SkPixel4444ToPixel32(U16CPU c) { + uint32_t d = (SkGetPackedA4444(c) << SK_A32_SHIFT) | + (SkGetPackedR4444(c) << SK_R32_SHIFT) | + (SkGetPackedG4444(c) << SK_G32_SHIFT) | + (SkGetPackedB4444(c) << SK_B32_SHIFT); + return d | (d << 4); +} + +static inline Sk4f swizzle_rb(const Sk4f& x) { + return SkNx_shuffle<2, 1, 0, 3>(x); +} + +static inline Sk4f swizzle_rb_if_bgra(const Sk4f& x) { +#ifdef SK_PMCOLOR_IS_BGRA + return swizzle_rb(x); +#else + return x; +#endif +} + +static inline Sk4f Sk4f_fromL32(uint32_t px) { + return SkNx_cast<float>(Sk4b::Load(&px)) * (1 / 255.0f); +} + +static inline uint32_t Sk4f_toL32(const Sk4f& px) { + Sk4f v = px; + +#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 + // SkNx_cast<uint8_t, int32_t>() pins, and we don't anticipate giant floats +#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON) + // SkNx_cast<uint8_t, int32_t>() pins, and so does Sk4f_round(). +#else + // No guarantee of a pin. + v = Sk4f::Max(0, Sk4f::Min(v, 1)); +#endif + + uint32_t l32; + SkNx_cast<uint8_t>(Sk4f_round(v * 255.0f)).store(&l32); + return l32; +} + +using SkPMColor4f = SkRGBA4f<kPremul_SkAlphaType>; + +constexpr SkPMColor4f SK_PMColor4fTRANSPARENT = { 0, 0, 0, 0 }; +constexpr SkPMColor4f SK_PMColor4fBLACK = { 0, 0, 0, 1 }; +constexpr SkPMColor4f SK_PMColor4fWHITE = { 1, 1, 1, 1 }; +constexpr SkPMColor4f SK_PMColor4fILLEGAL = { SK_FloatNegativeInfinity, + SK_FloatNegativeInfinity, + SK_FloatNegativeInfinity, + SK_FloatNegativeInfinity }; + +#endif diff --git a/src/deps/skia/include/private/SkDeque.h b/src/deps/skia/include/private/SkDeque.h new file mode 100644 index 000000000..8adc39c1c --- /dev/null +++ b/src/deps/skia/include/private/SkDeque.h @@ -0,0 +1,141 @@ + +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + +#ifndef SkDeque_DEFINED +#define SkDeque_DEFINED + +#include "include/core/SkTypes.h" + +/* + * The deque class works by blindly creating memory space of a specified element + * size. It manages the memory as a doubly linked list of blocks each of which + * can contain multiple elements. Pushes and pops add/remove blocks from the + * beginning/end of the list as necessary while each block tracks the used + * portion of its memory. + * One behavior to be aware of is that the pops do not immediately remove an + * empty block from the beginning/end of the list (Presumably so push/pop pairs + * on the block boundaries don't cause thrashing). This can result in the first/ + * last element not residing in the first/last block. + */ +class SK_API SkDeque { +public: + /** + * elemSize specifies the size of each individual element in the deque + * allocCount specifies how many elements are to be allocated as a block + */ + explicit SkDeque(size_t elemSize, int allocCount = 1); + SkDeque(size_t elemSize, void* storage, size_t storageSize, int allocCount = 1); + ~SkDeque(); + + bool empty() const { return 0 == fCount; } + int count() const { return fCount; } + size_t elemSize() const { return fElemSize; } + + const void* front() const { return fFront; } + const void* back() const { return fBack; } + + void* front() { + return (void*)((const SkDeque*)this)->front(); + } + + void* back() { + return (void*)((const SkDeque*)this)->back(); + } + + /** + * push_front and push_back return a pointer to the memory space + * for the new element + */ + void* push_front(); + void* push_back(); + + void pop_front(); + void pop_back(); + +private: + struct Block; + +public: + class Iter { + public: + enum IterStart { + kFront_IterStart, + kBack_IterStart, + }; + + /** + * Creates an uninitialized iterator. Must be reset() + */ + Iter(); + + Iter(const SkDeque& d, IterStart startLoc); + void* next(); + void* prev(); + + void reset(const SkDeque& d, IterStart startLoc); + + private: + SkDeque::Block* fCurBlock; + char* fPos; + size_t fElemSize; + }; + + // Inherit privately from Iter to prevent access to reverse iteration + class F2BIter : private Iter { + public: + F2BIter() {} + + /** + * Wrap Iter's 2 parameter ctor to force initialization to the + * beginning of the deque + */ + F2BIter(const SkDeque& d) : INHERITED(d, kFront_IterStart) {} + + using Iter::next; + + /** + * Wrap Iter::reset to force initialization to the beginning of the + * deque + */ + void reset(const SkDeque& d) { + this->INHERITED::reset(d, kFront_IterStart); + } + + private: + using INHERITED = Iter; + }; + +private: + // allow unit test to call numBlocksAllocated + friend class DequeUnitTestHelper; + + void* fFront; + void* fBack; + + Block* fFrontBlock; + Block* fBackBlock; + size_t fElemSize; + void* fInitialStorage; + int fCount; // number of elements in the deque + int fAllocCount; // number of elements to allocate per block + + Block* allocateBlock(int allocCount); + void freeBlock(Block* block); + + /** + * This returns the number of chunk blocks allocated by the deque. It + * can be used to gauge the effectiveness of the selected allocCount. + */ + int numBlocksAllocated() const; + + SkDeque(const SkDeque&) = delete; + SkDeque& operator=(const SkDeque&) = delete; +}; + +#endif diff --git a/src/deps/skia/include/private/SkEncodedInfo.h b/src/deps/skia/include/private/SkEncodedInfo.h new file mode 100644 index 000000000..92400d956 --- /dev/null +++ b/src/deps/skia/include/private/SkEncodedInfo.h @@ -0,0 +1,249 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkEncodedInfo_DEFINED +#define SkEncodedInfo_DEFINED + +#include <memory> + +#include "include/core/SkData.h" +#include "include/core/SkImageInfo.h" +#include "include/third_party/skcms/skcms.h" + +struct SkEncodedInfo { +public: + class ICCProfile { + public: + static std::unique_ptr<ICCProfile> Make(sk_sp<SkData>); + static std::unique_ptr<ICCProfile> Make(const skcms_ICCProfile&); + + const skcms_ICCProfile* profile() const { return &fProfile; } + private: + ICCProfile(const skcms_ICCProfile&, sk_sp<SkData> = nullptr); + + skcms_ICCProfile fProfile; + sk_sp<SkData> fData; + }; + + enum Alpha { + kOpaque_Alpha, + kUnpremul_Alpha, + + // Each pixel is either fully opaque or fully transparent. + // There is no difference between requesting kPremul or kUnpremul. + kBinary_Alpha, + }; + + /* + * We strive to make the number of components per pixel obvious through + * our naming conventions. + * Ex: kRGB has 3 components. kRGBA has 4 components. + * + * This sometimes results in redundant Alpha and Color information. + * Ex: kRGB images must also be kOpaque. + */ + enum Color { + // PNG, WBMP + kGray_Color, + + // PNG + kGrayAlpha_Color, + + // PNG with Skia-specific sBIT + // Like kGrayAlpha, except this expects to be treated as + // kAlpha_8_SkColorType, which ignores the gray component. If + // decoded to full color (e.g. kN32), the gray component is respected + // (so it can share code with kGrayAlpha). + kXAlpha_Color, + + // PNG + // 565 images may be encoded to PNG by specifying the number of + // significant bits for each channel. This is a strange 565 + // representation because the image is still encoded with 8 bits per + // component. + k565_Color, + + // PNG, GIF, BMP + kPalette_Color, + + // PNG, RAW + kRGB_Color, + kRGBA_Color, + + // BMP + kBGR_Color, + kBGRX_Color, + kBGRA_Color, + + // JPEG, WEBP + kYUV_Color, + + // WEBP + kYUVA_Color, + + // JPEG + // Photoshop actually writes inverted CMYK data into JPEGs, where zero + // represents 100% ink coverage. For this reason, we treat CMYK JPEGs + // as having inverted CMYK. libjpeg-turbo warns that this may break + // other applications, but the CMYK JPEGs we see on the web expect to + // be treated as inverted CMYK. + kInvertedCMYK_Color, + kYCCK_Color, + }; + + static SkEncodedInfo Make(int width, int height, Color color, Alpha alpha, + int bitsPerComponent) { + return Make(width, height, color, alpha, bitsPerComponent, nullptr); + } + + static SkEncodedInfo Make(int width, int height, Color color, Alpha alpha, + int bitsPerComponent, std::unique_ptr<ICCProfile> profile) { + SkASSERT(1 == bitsPerComponent || + 2 == bitsPerComponent || + 4 == bitsPerComponent || + 8 == bitsPerComponent || + 16 == bitsPerComponent); + + switch (color) { + case kGray_Color: + SkASSERT(kOpaque_Alpha == alpha); + break; + case kGrayAlpha_Color: + SkASSERT(kOpaque_Alpha != alpha); + break; + case kPalette_Color: + SkASSERT(16 != bitsPerComponent); + break; + case kRGB_Color: + case kBGR_Color: + case kBGRX_Color: + SkASSERT(kOpaque_Alpha == alpha); + SkASSERT(bitsPerComponent >= 8); + break; + case kYUV_Color: + case kInvertedCMYK_Color: + case kYCCK_Color: + SkASSERT(kOpaque_Alpha == alpha); + SkASSERT(8 == bitsPerComponent); + break; + case kRGBA_Color: + SkASSERT(bitsPerComponent >= 8); + break; + case kBGRA_Color: + case kYUVA_Color: + SkASSERT(8 == bitsPerComponent); + break; + case kXAlpha_Color: + SkASSERT(kUnpremul_Alpha == alpha); + SkASSERT(8 == bitsPerComponent); + break; + case k565_Color: + SkASSERT(kOpaque_Alpha == alpha); + SkASSERT(8 == bitsPerComponent); + break; + default: + SkASSERT(false); + break; + } + + return SkEncodedInfo(width, height, color, alpha, bitsPerComponent, std::move(profile)); + } + + /* + * Returns a recommended SkImageInfo. + * + * TODO: Leave this up to the client. + */ + SkImageInfo makeImageInfo() const { + auto ct = kGray_Color == fColor ? kGray_8_SkColorType : + kXAlpha_Color == fColor ? kAlpha_8_SkColorType : + k565_Color == fColor ? kRGB_565_SkColorType : + kN32_SkColorType ; + auto alpha = kOpaque_Alpha == fAlpha ? kOpaque_SkAlphaType + : kUnpremul_SkAlphaType; + sk_sp<SkColorSpace> cs = fProfile ? SkColorSpace::Make(*fProfile->profile()) + : nullptr; + if (!cs) { + cs = SkColorSpace::MakeSRGB(); + } + return SkImageInfo::Make(fWidth, fHeight, ct, alpha, std::move(cs)); + } + + int width() const { return fWidth; } + int height() const { return fHeight; } + Color color() const { return fColor; } + Alpha alpha() const { return fAlpha; } + bool opaque() const { return fAlpha == kOpaque_Alpha; } + const skcms_ICCProfile* profile() const { + if (!fProfile) return nullptr; + return fProfile->profile(); + } + + uint8_t bitsPerComponent() const { return fBitsPerComponent; } + + uint8_t bitsPerPixel() const { + switch (fColor) { + case kGray_Color: + return fBitsPerComponent; + case kXAlpha_Color: + case kGrayAlpha_Color: + return 2 * fBitsPerComponent; + case kPalette_Color: + return fBitsPerComponent; + case kRGB_Color: + case kBGR_Color: + case kYUV_Color: + case k565_Color: + return 3 * fBitsPerComponent; + case kRGBA_Color: + case kBGRA_Color: + case kBGRX_Color: + case kYUVA_Color: + case kInvertedCMYK_Color: + case kYCCK_Color: + return 4 * fBitsPerComponent; + default: + SkASSERT(false); + return 0; + } + } + + SkEncodedInfo(const SkEncodedInfo& orig) = delete; + SkEncodedInfo& operator=(const SkEncodedInfo&) = delete; + + SkEncodedInfo(SkEncodedInfo&& orig) = default; + SkEncodedInfo& operator=(SkEncodedInfo&&) = default; + + // Explicit copy method, to avoid accidental copying. + SkEncodedInfo copy() const { + auto copy = SkEncodedInfo::Make(fWidth, fHeight, fColor, fAlpha, fBitsPerComponent); + if (fProfile) { + copy.fProfile = std::make_unique<ICCProfile>(*fProfile); + } + return copy; + } + +private: + SkEncodedInfo(int width, int height, Color color, Alpha alpha, + uint8_t bitsPerComponent, std::unique_ptr<ICCProfile> profile) + : fWidth(width) + , fHeight(height) + , fColor(color) + , fAlpha(alpha) + , fBitsPerComponent(bitsPerComponent) + , fProfile(std::move(profile)) + {} + + int fWidth; + int fHeight; + Color fColor; + Alpha fAlpha; + uint8_t fBitsPerComponent; + std::unique_ptr<ICCProfile> fProfile; +}; + +#endif diff --git a/src/deps/skia/include/private/SkFixed.h b/src/deps/skia/include/private/SkFixed.h new file mode 100644 index 000000000..e34c19f2a --- /dev/null +++ b/src/deps/skia/include/private/SkFixed.h @@ -0,0 +1,141 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFixed_DEFINED +#define SkFixed_DEFINED + +#include "include/core/SkScalar.h" +#include "include/core/SkTypes.h" +#include "include/private/SkSafe_math.h" +#include "include/private/SkTPin.h" +#include "include/private/SkTo.h" + +/** \file SkFixed.h + + Types and macros for 16.16 fixed point +*/ + +/** 32 bit signed integer used to represent fractions values with 16 bits to the right of the decimal point +*/ +typedef int32_t SkFixed; +#define SK_Fixed1 (1 << 16) +#define SK_FixedHalf (1 << 15) +#define SK_FixedQuarter (1 << 14) +#define SK_FixedMax (0x7FFFFFFF) +#define SK_FixedMin (-SK_FixedMax) +#define SK_FixedPI (0x3243F) +#define SK_FixedSqrt2 (92682) +#define SK_FixedTanPIOver8 (0x6A0A) +#define SK_FixedRoot2Over2 (0xB505) + +// NOTE: SkFixedToFloat is exact. SkFloatToFixed seems to lack a rounding step. For all fixed-point +// values, this version is as accurate as possible for (fixed -> float -> fixed). Rounding reduces +// accuracy if the intermediate floats are in the range that only holds integers (adding 0.5f to an +// odd integer then snaps to nearest even). Using double for the rounding math gives maximum +// accuracy for (float -> fixed -> float), but that's usually overkill. +#define SkFixedToFloat(x) ((x) * 1.52587890625e-5f) +#define SkFloatToFixed(x) sk_float_saturate2int((x) * SK_Fixed1) + +#ifdef SK_DEBUG + static inline SkFixed SkFloatToFixed_Check(float x) { + int64_t n64 = (int64_t)(x * SK_Fixed1); + SkFixed n32 = (SkFixed)n64; + SkASSERT(n64 == n32); + return n32; + } +#else + #define SkFloatToFixed_Check(x) SkFloatToFixed(x) +#endif + +#define SkFixedToDouble(x) ((x) * 1.52587890625e-5) +#define SkDoubleToFixed(x) ((SkFixed)((x) * SK_Fixed1)) + +/** Converts an integer to a SkFixed, asserting that the result does not overflow + a 32 bit signed integer +*/ +#ifdef SK_DEBUG + inline SkFixed SkIntToFixed(int n) + { + SkASSERT(n >= -32768 && n <= 32767); + // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before + // shifting. + return (SkFixed)( (unsigned)n << 16 ); + } +#else + // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before + // shifting. Then we force the cast to SkFixed to ensure that the answer is signed (like the + // debug version). + #define SkIntToFixed(n) (SkFixed)((unsigned)(n) << 16) +#endif + +#define SkFixedRoundToInt(x) (((x) + SK_FixedHalf) >> 16) +#define SkFixedCeilToInt(x) (((x) + SK_Fixed1 - 1) >> 16) +#define SkFixedFloorToInt(x) ((x) >> 16) + +static inline SkFixed SkFixedRoundToFixed(SkFixed x) { + return (SkFixed)( (uint32_t)(x + SK_FixedHalf) & 0xFFFF0000 ); +} +static inline SkFixed SkFixedCeilToFixed(SkFixed x) { + return (SkFixed)( (uint32_t)(x + SK_Fixed1 - 1) & 0xFFFF0000 ); +} +static inline SkFixed SkFixedFloorToFixed(SkFixed x) { + return (SkFixed)( (uint32_t)x & 0xFFFF0000 ); +} + +#define SkFixedAbs(x) SkAbs32(x) +#define SkFixedAve(a, b) (((a) + (b)) >> 1) + +// The divide may exceed 32 bits. Clamp to a signed 32 bit result. +#define SkFixedDiv(numer, denom) \ + SkToS32(SkTPin<int64_t>((SkLeftShift((int64_t)(numer), 16) / (denom)), SK_MinS32, SK_MaxS32)) + +static inline SkFixed SkFixedMul(SkFixed a, SkFixed b) { + return (SkFixed)((int64_t)a * b >> 16); +} + +/////////////////////////////////////////////////////////////////////////////// +// Platform-specific alternatives to our portable versions. + +// The VCVT float-to-fixed instruction is part of the VFPv3 instruction set. +#if defined(__ARM_VFPV3__) + /* This does not handle NaN or other obscurities, but is faster than + than (int)(x*65536). When built on Android with -Os, needs forcing + to inline or we lose the speed benefit. + */ + SK_ALWAYS_INLINE SkFixed SkFloatToFixed_arm(float x) + { + int32_t y; + asm("vcvt.s32.f32 %0, %0, #16": "+w"(x)); + memcpy(&y, &x, sizeof(y)); + return y; + } + #undef SkFloatToFixed + #define SkFloatToFixed(x) SkFloatToFixed_arm(x) +#endif + +/////////////////////////////////////////////////////////////////////////////// + +#define SkFixedToScalar(x) SkFixedToFloat(x) +#define SkScalarToFixed(x) SkFloatToFixed(x) + +/////////////////////////////////////////////////////////////////////////////// + +typedef int64_t SkFixed3232; // 32.32 + +#define SkFixed3232Max SK_MaxS64 +#define SkFixed3232Min (-SkFixed3232Max) + +#define SkIntToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 32)) +#define SkFixed3232ToInt(x) ((int)((x) >> 32)) +#define SkFixedToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 16)) +#define SkFixed3232ToFixed(x) ((SkFixed)((x) >> 16)) +#define SkFloatToFixed3232(x) sk_float_saturate2int64((x) * (65536.0f * 65536.0f)) +#define SkFixed3232ToFloat(x) (x * (1 / (65536.0f * 65536.0f))) + +#define SkScalarToFixed3232(x) SkFloatToFixed3232(x) + +#endif diff --git a/src/deps/skia/include/private/SkFloatBits.h b/src/deps/skia/include/private/SkFloatBits.h new file mode 100644 index 000000000..89eea4b9e --- /dev/null +++ b/src/deps/skia/include/private/SkFloatBits.h @@ -0,0 +1,91 @@ +/* + * Copyright 2008 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFloatBits_DEFINED +#define SkFloatBits_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkSafe_math.h" + +#include <float.h> + +/** Convert a sign-bit int (i.e. float interpreted as int) into a 2s compliement + int. This also converts -0 (0x80000000) to 0. Doing this to a float allows + it to be compared using normal C operators (<, <=, etc.) +*/ +static inline int32_t SkSignBitTo2sCompliment(int32_t x) { + if (x < 0) { + x &= 0x7FFFFFFF; + x = -x; + } + return x; +} + +/** Convert a 2s compliment int to a sign-bit (i.e. int interpreted as float). + This undoes the result of SkSignBitTo2sCompliment(). + */ +static inline int32_t Sk2sComplimentToSignBit(int32_t x) { + int sign = x >> 31; + // make x positive + x = (x ^ sign) - sign; + // set the sign bit as needed + x |= SkLeftShift(sign, 31); + return x; +} + +union SkFloatIntUnion { + float fFloat; + int32_t fSignBitInt; +}; + +// Helper to see a float as its bit pattern (w/o aliasing warnings) +static inline int32_t SkFloat2Bits(float x) { + SkFloatIntUnion data; + data.fFloat = x; + return data.fSignBitInt; +} + +// Helper to see a bit pattern as a float (w/o aliasing warnings) +static inline float SkBits2Float(int32_t floatAsBits) { + SkFloatIntUnion data; + data.fSignBitInt = floatAsBits; + return data.fFloat; +} + +constexpr int32_t gFloatBits_exponent_mask = 0x7F800000; +constexpr int32_t gFloatBits_matissa_mask = 0x007FFFFF; + +static inline bool SkFloatBits_IsFinite(int32_t bits) { + return (bits & gFloatBits_exponent_mask) != gFloatBits_exponent_mask; +} + +static inline bool SkFloatBits_IsInf(int32_t bits) { + return ((bits & gFloatBits_exponent_mask) == gFloatBits_exponent_mask) && + (bits & gFloatBits_matissa_mask) == 0; +} + +/** Return the float as a 2s compliment int. Just to be used to compare floats + to each other or against positive float-bit-constants (like 0). This does + not return the int equivalent of the float, just something cheaper for + compares-only. + */ +static inline int32_t SkFloatAs2sCompliment(float x) { + return SkSignBitTo2sCompliment(SkFloat2Bits(x)); +} + +/** Return the 2s compliment int as a float. This undos the result of + SkFloatAs2sCompliment + */ +static inline float Sk2sComplimentAsFloat(int32_t x) { + return SkBits2Float(Sk2sComplimentToSignBit(x)); +} + +// Scalar wrappers for float-bit routines + +#define SkScalarAs2sCompliment(x) SkFloatAs2sCompliment(x) + +#endif diff --git a/src/deps/skia/include/private/SkFloatingPoint.h b/src/deps/skia/include/private/SkFloatingPoint.h new file mode 100644 index 000000000..fbabd0ebc --- /dev/null +++ b/src/deps/skia/include/private/SkFloatingPoint.h @@ -0,0 +1,272 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFloatingPoint_DEFINED +#define SkFloatingPoint_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkFloatBits.h" +#include "include/private/SkSafe_math.h" +#include <float.h> +#include <math.h> +#include <cmath> +#include <cstring> +#include <limits> + + +#if defined(SK_LEGACY_FLOAT_RSQRT) +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1 + #include <xmmintrin.h> +#elif defined(SK_ARM_HAS_NEON) + #include <arm_neon.h> +#endif +#endif + +constexpr float SK_FloatSqrt2 = 1.41421356f; +constexpr float SK_FloatPI = 3.14159265f; +constexpr double SK_DoublePI = 3.14159265358979323846264338327950288; + +// C++98 cmath std::pow seems to be the earliest portable way to get float pow. +// However, on Linux including cmath undefines isfinite. +// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14608 +static inline float sk_float_pow(float base, float exp) { + return powf(base, exp); +} + +#define sk_float_sqrt(x) sqrtf(x) +#define sk_float_sin(x) sinf(x) +#define sk_float_cos(x) cosf(x) +#define sk_float_tan(x) tanf(x) +#define sk_float_floor(x) floorf(x) +#define sk_float_ceil(x) ceilf(x) +#define sk_float_trunc(x) truncf(x) +#ifdef SK_BUILD_FOR_MAC +# define sk_float_acos(x) static_cast<float>(acos(x)) +# define sk_float_asin(x) static_cast<float>(asin(x)) +#else +# define sk_float_acos(x) acosf(x) +# define sk_float_asin(x) asinf(x) +#endif +#define sk_float_atan2(y,x) atan2f(y,x) +#define sk_float_abs(x) fabsf(x) +#define sk_float_copysign(x, y) copysignf(x, y) +#define sk_float_mod(x,y) fmodf(x,y) +#define sk_float_exp(x) expf(x) +#define sk_float_log(x) logf(x) + +constexpr float sk_float_degrees_to_radians(float degrees) { + return degrees * (SK_FloatPI / 180); +} + +constexpr float sk_float_radians_to_degrees(float radians) { + return radians * (180 / SK_FloatPI); +} + +#define sk_float_round(x) sk_float_floor((x) + 0.5f) + +// can't find log2f on android, but maybe that just a tool bug? +#ifdef SK_BUILD_FOR_ANDROID + static inline float sk_float_log2(float x) { + const double inv_ln_2 = 1.44269504088896; + return (float)(log(x) * inv_ln_2); + } +#else + #define sk_float_log2(x) log2f(x) +#endif + +static inline bool sk_float_isfinite(float x) { + return SkFloatBits_IsFinite(SkFloat2Bits(x)); +} + +static inline bool sk_floats_are_finite(float a, float b) { + return sk_float_isfinite(a) && sk_float_isfinite(b); +} + +static inline bool sk_floats_are_finite(const float array[], int count) { + float prod = 0; + for (int i = 0; i < count; ++i) { + prod *= array[i]; + } + // At this point, prod will either be NaN or 0 + return prod == 0; // if prod is NaN, this check will return false +} + +static inline bool sk_float_isinf(float x) { + return SkFloatBits_IsInf(SkFloat2Bits(x)); +} + +static inline bool sk_float_isnan(float x) { + return !(x == x); +} + +#define sk_double_isnan(a) sk_float_isnan(a) + +#define SK_MaxS32FitsInFloat 2147483520 +#define SK_MinS32FitsInFloat -SK_MaxS32FitsInFloat + +#define SK_MaxS64FitsInFloat (SK_MaxS64 >> (63-24) << (63-24)) // 0x7fffff8000000000 +#define SK_MinS64FitsInFloat -SK_MaxS64FitsInFloat + +/** + * Return the closest int for the given float. Returns SK_MaxS32FitsInFloat for NaN. + */ +static inline int sk_float_saturate2int(float x) { + x = x < SK_MaxS32FitsInFloat ? x : SK_MaxS32FitsInFloat; + x = x > SK_MinS32FitsInFloat ? x : SK_MinS32FitsInFloat; + return (int)x; +} + +/** + * Return the closest int for the given double. Returns SK_MaxS32 for NaN. + */ +static inline int sk_double_saturate2int(double x) { + x = x < SK_MaxS32 ? x : SK_MaxS32; + x = x > SK_MinS32 ? x : SK_MinS32; + return (int)x; +} + +/** + * Return the closest int64_t for the given float. Returns SK_MaxS64FitsInFloat for NaN. + */ +static inline int64_t sk_float_saturate2int64(float x) { + x = x < SK_MaxS64FitsInFloat ? x : SK_MaxS64FitsInFloat; + x = x > SK_MinS64FitsInFloat ? x : SK_MinS64FitsInFloat; + return (int64_t)x; +} + +#define sk_float_floor2int(x) sk_float_saturate2int(sk_float_floor(x)) +#define sk_float_round2int(x) sk_float_saturate2int(sk_float_floor((x) + 0.5f)) +#define sk_float_ceil2int(x) sk_float_saturate2int(sk_float_ceil(x)) + +#define sk_float_floor2int_no_saturate(x) (int)sk_float_floor(x) +#define sk_float_round2int_no_saturate(x) (int)sk_float_floor((x) + 0.5f) +#define sk_float_ceil2int_no_saturate(x) (int)sk_float_ceil(x) + +#define sk_double_floor(x) floor(x) +#define sk_double_round(x) floor((x) + 0.5) +#define sk_double_ceil(x) ceil(x) +#define sk_double_floor2int(x) (int)floor(x) +#define sk_double_round2int(x) (int)floor((x) + 0.5) +#define sk_double_ceil2int(x) (int)ceil(x) + +// Cast double to float, ignoring any warning about too-large finite values being cast to float. +// Clang thinks this is undefined, but it's actually implementation defined to return either +// the largest float or infinity (one of the two bracketing representable floats). Good enough! +SK_ATTRIBUTE(no_sanitize("float-cast-overflow")) +static inline float sk_double_to_float(double x) { + return static_cast<float>(x); +} + +#define SK_FloatNaN std::numeric_limits<float>::quiet_NaN() +#define SK_FloatInfinity (+std::numeric_limits<float>::infinity()) +#define SK_FloatNegativeInfinity (-std::numeric_limits<float>::infinity()) + +#define SK_DoubleNaN std::numeric_limits<double>::quiet_NaN() + +// Returns false if any of the floats are outside of [0...1] +// Returns true if count is 0 +bool sk_floats_are_unit(const float array[], size_t count); + +#if defined(SK_LEGACY_FLOAT_RSQRT) +static inline float sk_float_rsqrt_portable(float x) { + // Get initial estimate. + int i; + memcpy(&i, &x, 4); + i = 0x5F1FFFF9 - (i>>1); + float estimate; + memcpy(&estimate, &i, 4); + + // One step of Newton's method to refine. + const float estimate_sq = estimate*estimate; + estimate *= 0.703952253f*(2.38924456f-x*estimate_sq); + return estimate; +} + +// Fast, approximate inverse square root. +// Compare to name-brand "1.0f / sk_float_sqrt(x)". Should be around 10x faster on SSE, 2x on NEON. +static inline float sk_float_rsqrt(float x) { +// We want all this inlined, so we'll inline SIMD and just take the hit when we don't know we've got +// it at compile time. This is going to be too fast to productively hide behind a function pointer. +// +// We do one step of Newton's method to refine the estimates in the NEON and portable paths. No +// refinement is faster, but very innacurate. Two steps is more accurate, but slower than 1/sqrt. +// +// Optimized constants in the portable path courtesy of http://rrrola.wz.cz/inv_sqrt.html +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1 + return _mm_cvtss_f32(_mm_rsqrt_ss(_mm_set_ss(x))); +#elif defined(SK_ARM_HAS_NEON) + // Get initial estimate. + const float32x2_t xx = vdup_n_f32(x); // Clever readers will note we're doing everything 2x. + float32x2_t estimate = vrsqrte_f32(xx); + + // One step of Newton's method to refine. + const float32x2_t estimate_sq = vmul_f32(estimate, estimate); + estimate = vmul_f32(estimate, vrsqrts_f32(xx, estimate_sq)); + return vget_lane_f32(estimate, 0); // 1 will work fine too; the answer's in both places. +#else + return sk_float_rsqrt_portable(x); +#endif +} +#else + +static inline float sk_float_rsqrt_portable(float x) { return 1.0f / sk_float_sqrt(x); } +static inline float sk_float_rsqrt (float x) { return 1.0f / sk_float_sqrt(x); } + +#endif + +// Returns the log2 of the provided value, were that value to be rounded up to the next power of 2. +// Returns 0 if value <= 0: +// Never returns a negative number, even if value is NaN. +// +// sk_float_nextlog2((-inf..1]) -> 0 +// sk_float_nextlog2((1..2]) -> 1 +// sk_float_nextlog2((2..4]) -> 2 +// sk_float_nextlog2((4..8]) -> 3 +// ... +static inline int sk_float_nextlog2(float x) { + uint32_t bits = (uint32_t)SkFloat2Bits(x); + bits += (1u << 23) - 1u; // Increment the exponent for non-powers-of-2. + int exp = ((int32_t)bits >> 23) - 127; + return exp & ~(exp >> 31); // Return 0 for negative or denormalized floats, and exponents < 0. +} + +// This is the number of significant digits we can print in a string such that when we read that +// string back we get the floating point number we expect. The minimum value C requires is 6, but +// most compilers support 9 +#ifdef FLT_DECIMAL_DIG +#define SK_FLT_DECIMAL_DIG FLT_DECIMAL_DIG +#else +#define SK_FLT_DECIMAL_DIG 9 +#endif + +// IEEE defines how float divide behaves for non-finite values and zero-denoms, but C does not +// so we have a helper that suppresses the possible undefined-behavior warnings. + +SK_ATTRIBUTE(no_sanitize("float-divide-by-zero")) +static inline float sk_ieee_float_divide(float numer, float denom) { + return numer / denom; +} + +SK_ATTRIBUTE(no_sanitize("float-divide-by-zero")) +static inline double sk_ieee_double_divide(double numer, double denom) { + return numer / denom; +} + +// While we clean up divide by zero, we'll replace places that do divide by zero with this TODO. +static inline float sk_ieee_float_divide_TODO_IS_DIVIDE_BY_ZERO_SAFE_HERE(float n, float d) { + return sk_ieee_float_divide(n,d); +} + +static inline float sk_fmaf(float f, float m, float a) { +#if defined(FP_FAST_FMA) + return std::fmaf(f,m,a); +#else + return f*m+a; +#endif +} + +#endif diff --git a/src/deps/skia/include/private/SkHalf.h b/src/deps/skia/include/private/SkHalf.h new file mode 100644 index 000000000..d95189131 --- /dev/null +++ b/src/deps/skia/include/private/SkHalf.h @@ -0,0 +1,85 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkHalf_DEFINED +#define SkHalf_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkNx.h" + +// 16-bit floating point value +// format is 1 bit sign, 5 bits exponent, 10 bits mantissa +// only used for storage +typedef uint16_t SkHalf; + +static constexpr uint16_t SK_HalfMin = 0x0400; // 2^-14 (minimum positive normal value) +static constexpr uint16_t SK_HalfMax = 0x7bff; // 65504 +static constexpr uint16_t SK_HalfEpsilon = 0x1400; // 2^-10 +static constexpr uint16_t SK_Half1 = 0x3C00; // 1 + +// convert between half and single precision floating point +float SkHalfToFloat(SkHalf h); +SkHalf SkFloatToHalf(float f); + +// Convert between half and single precision floating point, +// assuming inputs and outputs are both finite, and may +// flush values which would be denormal half floats to zero. +static inline Sk4f SkHalfToFloat_finite_ftz(uint64_t); +static inline Sk4h SkFloatToHalf_finite_ftz(const Sk4f&); + +// ~~~~~~~~~~~ impl ~~~~~~~~~~~~~~ // + +// Like the serial versions in SkHalf.cpp, these are based on +// https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/ + +// GCC 4.9 lacks the intrinsics to use ARMv8 f16<->f32 instructions, so we use inline assembly. + +static inline Sk4f SkHalfToFloat_finite_ftz(uint64_t rgba) { + Sk4h hs = Sk4h::Load(&rgba); +#if !defined(SKNX_NO_SIMD) && defined(SK_CPU_ARM64) + float32x4_t fs; + asm ("fcvtl %[fs].4s, %[hs].4h \n" // vcvt_f32_f16(...) + : [fs] "=w" (fs) // =w: write-only NEON register + : [hs] "w" (hs.fVec)); // w: read-only NEON register + return fs; +#else + Sk4i bits = SkNx_cast<int>(hs), // Expand to 32 bit. + sign = bits & 0x00008000, // Save the sign bit for later... + positive = bits ^ sign, // ...but strip it off for now. + is_norm = 0x03ff < positive; // Exponent > 0? + + // For normal half floats, extend the mantissa by 13 zero bits, + // then adjust the exponent from 15 bias to 127 bias. + Sk4i norm = (positive << 13) + ((127 - 15) << 23); + + Sk4i merged = (sign << 16) | (norm & is_norm); + return Sk4f::Load(&merged); +#endif +} + +static inline Sk4h SkFloatToHalf_finite_ftz(const Sk4f& fs) { +#if !defined(SKNX_NO_SIMD) && defined(SK_CPU_ARM64) + float32x4_t vec = fs.fVec; + asm ("fcvtn %[vec].4h, %[vec].4s \n" // vcvt_f16_f32(vec) + : [vec] "+w" (vec)); // +w: read-write NEON register + return vreinterpret_u16_f32(vget_low_f32(vec)); +#else + Sk4i bits = Sk4i::Load(&fs), + sign = bits & 0x80000000, // Save the sign bit for later... + positive = bits ^ sign, // ...but strip it off for now. + will_be_norm = 0x387fdfff < positive; // greater than largest denorm half? + + // For normal half floats, adjust the exponent from 127 bias to 15 bias, + // then drop the bottom 13 mantissa bits. + Sk4i norm = (positive - ((127 - 15) << 23)) >> 13; + + Sk4i merged = (sign >> 16) | (will_be_norm & norm); + return SkNx_cast<uint16_t>(merged); +#endif +} + +#endif diff --git a/src/deps/skia/include/private/SkIDChangeListener.h b/src/deps/skia/include/private/SkIDChangeListener.h new file mode 100644 index 000000000..f7a5900e0 --- /dev/null +++ b/src/deps/skia/include/private/SkIDChangeListener.h @@ -0,0 +1,75 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkIDChangeListener_DEFINED +#define SkIDChangeListener_DEFINED + +#include "include/core/SkRefCnt.h" +#include "include/private/SkMutex.h" +#include "include/private/SkTDArray.h" + +#include <atomic> + +/** + * Used to be notified when a gen/unique ID is invalidated, typically to preemptively purge + * associated items from a cache that are no longer reachable. The listener can + * be marked for deregistration if the cached item is remove before the listener is + * triggered. This prevents unbounded listener growth when cache items are routinely + * removed before the gen ID/unique ID is invalidated. + */ +class SkIDChangeListener : public SkRefCnt { +public: + SkIDChangeListener(); + + ~SkIDChangeListener() override; + + virtual void changed() = 0; + + /** + * Mark the listener is no longer needed. It should be removed and changed() should not be + * called. + */ + void markShouldDeregister() { fShouldDeregister.store(true, std::memory_order_relaxed); } + + /** Indicates whether markShouldDeregister was called. */ + bool shouldDeregister() { return fShouldDeregister.load(std::memory_order_acquire); } + + /** Manages a list of SkIDChangeListeners. */ + class List { + public: + List(); + + ~List(); + + /** + * Add a new listener to the list. It must not already be deregistered. Also clears out + * previously deregistered listeners. + */ + void add(sk_sp<SkIDChangeListener> listener) SK_EXCLUDES(fMutex); + + /** + * The number of registered listeners (including deregisterd listeners that are yet-to-be + * removed. + */ + int count() const SK_EXCLUDES(fMutex); + + /** Calls changed() on all listeners that haven't been deregistered and resets the list. */ + void changed() SK_EXCLUDES(fMutex); + + /** Resets without calling changed() on the listeners. */ + void reset() SK_EXCLUDES(fMutex); + + private: + mutable SkMutex fMutex; + SkTDArray<SkIDChangeListener*> fListeners SK_GUARDED_BY(fMutex); // pointers are reffed + }; + +private: + std::atomic<bool> fShouldDeregister; +}; + +#endif diff --git a/src/deps/skia/include/private/SkImageInfoPriv.h b/src/deps/skia/include/private/SkImageInfoPriv.h new file mode 100644 index 000000000..5e4abb82c --- /dev/null +++ b/src/deps/skia/include/private/SkImageInfoPriv.h @@ -0,0 +1,193 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkImageInfoPriv_DEFINED +#define SkImageInfoPriv_DEFINED + +#include "include/core/SkColor.h" +#include "include/core/SkImageInfo.h" + +static inline uint32_t SkColorTypeChannelFlags(SkColorType ct) { + switch (ct) { + case kUnknown_SkColorType: return 0; + case kAlpha_8_SkColorType: return kAlpha_SkColorChannelFlag; + case kRGB_565_SkColorType: return kRGB_SkColorChannelFlags; + case kARGB_4444_SkColorType: return kRGBA_SkColorChannelFlags; + case kRGBA_8888_SkColorType: return kRGBA_SkColorChannelFlags; + case kRGB_888x_SkColorType: return kRGB_SkColorChannelFlags; + case kBGRA_8888_SkColorType: return kRGBA_SkColorChannelFlags; + case kRGBA_1010102_SkColorType: return kRGBA_SkColorChannelFlags; + case kRGB_101010x_SkColorType: return kRGB_SkColorChannelFlags; + case kBGRA_1010102_SkColorType: return kRGBA_SkColorChannelFlags; + case kBGR_101010x_SkColorType: return kRGB_SkColorChannelFlags; + case kGray_8_SkColorType: return kGray_SkColorChannelFlag; + case kRGBA_F16Norm_SkColorType: return kRGBA_SkColorChannelFlags; + case kRGBA_F16_SkColorType: return kRGBA_SkColorChannelFlags; + case kRGBA_F32_SkColorType: return kRGBA_SkColorChannelFlags; + case kR8G8_unorm_SkColorType: return kRG_SkColorChannelFlags; + case kA16_unorm_SkColorType: return kAlpha_SkColorChannelFlag; + case kR16G16_unorm_SkColorType: return kRG_SkColorChannelFlags; + case kA16_float_SkColorType: return kAlpha_SkColorChannelFlag; + case kR16G16_float_SkColorType: return kRG_SkColorChannelFlags; + case kR16G16B16A16_unorm_SkColorType: return kRGBA_SkColorChannelFlags; + case kSRGBA_8888_SkColorType: return kRGBA_SkColorChannelFlags; + } + SkUNREACHABLE; +} + +static inline bool SkColorTypeIsAlphaOnly(SkColorType ct) { + return SkColorTypeChannelFlags(ct) == kAlpha_SkColorChannelFlag; +} + +static inline bool SkAlphaTypeIsValid(unsigned value) { + return value <= kLastEnum_SkAlphaType; +} + +static int SkColorTypeShiftPerPixel(SkColorType ct) { + switch (ct) { + case kUnknown_SkColorType: return 0; + case kAlpha_8_SkColorType: return 0; + case kRGB_565_SkColorType: return 1; + case kARGB_4444_SkColorType: return 1; + case kRGBA_8888_SkColorType: return 2; + case kRGB_888x_SkColorType: return 2; + case kBGRA_8888_SkColorType: return 2; + case kRGBA_1010102_SkColorType: return 2; + case kRGB_101010x_SkColorType: return 2; + case kBGRA_1010102_SkColorType: return 2; + case kBGR_101010x_SkColorType: return 2; + case kGray_8_SkColorType: return 0; + case kRGBA_F16Norm_SkColorType: return 3; + case kRGBA_F16_SkColorType: return 3; + case kRGBA_F32_SkColorType: return 4; + case kR8G8_unorm_SkColorType: return 1; + case kA16_unorm_SkColorType: return 1; + case kR16G16_unorm_SkColorType: return 2; + case kA16_float_SkColorType: return 1; + case kR16G16_float_SkColorType: return 2; + case kR16G16B16A16_unorm_SkColorType: return 3; + case kSRGBA_8888_SkColorType: return 2; + } + SkUNREACHABLE; +} + +static inline size_t SkColorTypeMinRowBytes(SkColorType ct, int width) { + return (size_t)(width * SkColorTypeBytesPerPixel(ct)); +} + +static inline bool SkColorTypeIsValid(unsigned value) { + return value <= kLastEnum_SkColorType; +} + +static inline size_t SkColorTypeComputeOffset(SkColorType ct, int x, int y, size_t rowBytes) { + if (kUnknown_SkColorType == ct) { + return 0; + } + return (size_t)y * rowBytes + ((size_t)x << SkColorTypeShiftPerPixel(ct)); +} + +static inline bool SkColorTypeIsNormalized(SkColorType ct) { + switch (ct) { + case kUnknown_SkColorType: + case kAlpha_8_SkColorType: + case kRGB_565_SkColorType: + case kARGB_4444_SkColorType: + case kRGBA_8888_SkColorType: + case kRGB_888x_SkColorType: + case kBGRA_8888_SkColorType: + case kRGBA_1010102_SkColorType: + case kRGB_101010x_SkColorType: + case kBGRA_1010102_SkColorType: + case kBGR_101010x_SkColorType: + case kGray_8_SkColorType: + case kRGBA_F16Norm_SkColorType: + case kR8G8_unorm_SkColorType: + case kA16_unorm_SkColorType: + case kA16_float_SkColorType: /*subtle... alpha is always [0,1]*/ + case kR16G16_unorm_SkColorType: + case kR16G16B16A16_unorm_SkColorType: + case kSRGBA_8888_SkColorType: return true; + + case kRGBA_F16_SkColorType: + case kRGBA_F32_SkColorType: + case kR16G16_float_SkColorType: return false; + } + SkUNREACHABLE; +} + +static inline int SkColorTypeMaxBitsPerChannel(SkColorType ct) { + switch (ct) { + case kUnknown_SkColorType: + return 0; + + case kARGB_4444_SkColorType: + return 4; + + case kRGB_565_SkColorType: + return 6; + + case kAlpha_8_SkColorType: + case kRGBA_8888_SkColorType: + case kRGB_888x_SkColorType: + case kBGRA_8888_SkColorType: + case kGray_8_SkColorType: + case kR8G8_unorm_SkColorType: + case kSRGBA_8888_SkColorType: + return 8; + + case kRGBA_1010102_SkColorType: + case kRGB_101010x_SkColorType: + case kBGRA_1010102_SkColorType: + case kBGR_101010x_SkColorType: + return 10; + + case kRGBA_F16Norm_SkColorType: + case kA16_unorm_SkColorType: + case kA16_float_SkColorType: + case kR16G16_unorm_SkColorType: + case kR16G16B16A16_unorm_SkColorType: + case kRGBA_F16_SkColorType: + case kR16G16_float_SkColorType: + return 16; + + case kRGBA_F32_SkColorType: + return 32; + } + SkUNREACHABLE; +} + +/** + * Returns true if |info| contains a valid colorType and alphaType. + */ +static inline bool SkColorInfoIsValid(const SkColorInfo& info) { + return info.colorType() != kUnknown_SkColorType && info.alphaType() != kUnknown_SkAlphaType; +} + +/** + * Returns true if |info| contains a valid combination of width, height and colorInfo. + */ +static inline bool SkImageInfoIsValid(const SkImageInfo& info) { + if (info.width() <= 0 || info.height() <= 0) { + return false; + } + + const int kMaxDimension = SK_MaxS32 >> 2; + if (info.width() > kMaxDimension || info.height() > kMaxDimension) { + return false; + } + + return SkColorInfoIsValid(info.colorInfo()); +} + +/** + * Returns true if Skia has defined a pixel conversion from the |src| to the |dst|. + * Returns false otherwise. + */ +static inline bool SkImageInfoValidConversion(const SkImageInfo& dst, const SkImageInfo& src) { + return SkImageInfoIsValid(dst) && SkImageInfoIsValid(src); +} +#endif // SkImageInfoPriv_DEFINED diff --git a/src/deps/skia/include/private/SkMacros.h b/src/deps/skia/include/private/SkMacros.h new file mode 100644 index 000000000..7732d44d7 --- /dev/null +++ b/src/deps/skia/include/private/SkMacros.h @@ -0,0 +1,84 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef SkMacros_DEFINED +#define SkMacros_DEFINED + +/* + * Usage: SK_MACRO_CONCAT(a, b) to construct the symbol ab + * + * SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly + * + */ +#define SK_MACRO_CONCAT(X, Y) SK_MACRO_CONCAT_IMPL_PRIV(X, Y) +#define SK_MACRO_CONCAT_IMPL_PRIV(X, Y) X ## Y + +/* + * Usage: SK_MACRO_APPEND_LINE(foo) to make foo123, where 123 is the current + * line number. Easy way to construct + * unique names for local functions or + * variables. + */ +#define SK_MACRO_APPEND_LINE(name) SK_MACRO_CONCAT(name, __LINE__) + +#define SK_MACRO_APPEND_COUNTER(name) SK_MACRO_CONCAT(name, __COUNTER__) + +//////////////////////////////////////////////////////////////////////////////// + +// Can be used to bracket data types that must be dense, e.g. hash keys. +#if defined(__clang__) // This should work on GCC too, but GCC diagnostic pop didn't seem to work! + #define SK_BEGIN_REQUIRE_DENSE _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic error \"-Wpadded\"") + #define SK_END_REQUIRE_DENSE _Pragma("GCC diagnostic pop") +#else + #define SK_BEGIN_REQUIRE_DENSE + #define SK_END_REQUIRE_DENSE +#endif + +#define SK_INIT_TO_AVOID_WARNING = 0 + +//////////////////////////////////////////////////////////////////////////////// + +/** + * Defines overloaded bitwise operators to make it easier to use an enum as a + * bitfield. + */ +#define SK_MAKE_BITFIELD_OPS(X) \ + inline X operator |(X a, X b) { \ + return (X) (+a | +b); \ + } \ + inline X& operator |=(X& a, X b) { \ + return (a = a | b); \ + } \ + inline X operator &(X a, X b) { \ + return (X) (+a & +b); \ + } \ + inline X& operator &=(X& a, X b) { \ + return (a = a & b); \ + } \ + template <typename T> \ + inline X operator &(T a, X b) { \ + return (X) (+a & +b); \ + } \ + template <typename T> \ + inline X operator &(X a, T b) { \ + return (X) (+a & +b); \ + } \ + +#define SK_DECL_BITFIELD_OPS_FRIENDS(X) \ + friend X operator |(X a, X b); \ + friend X& operator |=(X& a, X b); \ + \ + friend X operator &(X a, X b); \ + friend X& operator &=(X& a, X b); \ + \ + template <typename T> \ + friend X operator &(T a, X b); \ + \ + template <typename T> \ + friend X operator &(X a, T b); \ + +#endif // SkMacros_DEFINED diff --git a/src/deps/skia/include/private/SkMalloc.h b/src/deps/skia/include/private/SkMalloc.h new file mode 100644 index 000000000..033294cf8 --- /dev/null +++ b/src/deps/skia/include/private/SkMalloc.h @@ -0,0 +1,143 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkMalloc_DEFINED +#define SkMalloc_DEFINED + +#include <cstddef> +#include <cstring> + +#include "include/core/SkTypes.h" + +/* + memory wrappers to be implemented by the porting layer (platform) +*/ + + +/** Free memory returned by sk_malloc(). It is safe to pass null. */ +SK_API extern void sk_free(void*); + +/** + * Called internally if we run out of memory. The platform implementation must + * not return, but should either throw an exception or otherwise exit. + */ +SK_API extern void sk_out_of_memory(void); + +enum { + /** + * If this bit is set, the returned buffer must be zero-initialized. If this bit is not set + * the buffer can be uninitialized. + */ + SK_MALLOC_ZERO_INITIALIZE = 1 << 0, + + /** + * If this bit is set, the implementation must throw/crash/quit if the request cannot + * be fulfilled. If this bit is not set, then it should return nullptr on failure. + */ + SK_MALLOC_THROW = 1 << 1, +}; +/** + * Return a block of memory (at least 4-byte aligned) of at least the specified size. + * If the requested memory cannot be returned, either return nullptr or throw/exit, depending + * on the SK_MALLOC_THROW bit. If the allocation succeeds, the memory will be zero-initialized + * if the SK_MALLOC_ZERO_INITIALIZE bit was set. + * + * To free the memory, call sk_free() + */ +SK_API extern void* sk_malloc_flags(size_t size, unsigned flags); + +/** Same as standard realloc(), but this one never returns null on failure. It will throw + * an exception if it fails. + */ +SK_API extern void* sk_realloc_throw(void* buffer, size_t size); + +static inline void* sk_malloc_throw(size_t size) { + return sk_malloc_flags(size, SK_MALLOC_THROW); +} + +static inline void* sk_calloc_throw(size_t size) { + return sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_ZERO_INITIALIZE); +} + +static inline void* sk_calloc_canfail(size_t size) { +#if defined(SK_BUILD_FOR_FUZZER) + // To reduce the chance of OOM, pretend we can't allocate more than 200kb. + if (size > 200000) { + return nullptr; + } +#endif + return sk_malloc_flags(size, SK_MALLOC_ZERO_INITIALIZE); +} + +// Performs a safe multiply count * elemSize, checking for overflow +SK_API extern void* sk_calloc_throw(size_t count, size_t elemSize); +SK_API extern void* sk_malloc_throw(size_t count, size_t elemSize); +SK_API extern void* sk_realloc_throw(void* buffer, size_t count, size_t elemSize); + +/** + * These variants return nullptr on failure + */ +static inline void* sk_malloc_canfail(size_t size) { +#if defined(SK_BUILD_FOR_FUZZER) + // To reduce the chance of OOM, pretend we can't allocate more than 200kb. + if (size > 200000) { + return nullptr; + } +#endif + return sk_malloc_flags(size, 0); +} +SK_API extern void* sk_malloc_canfail(size_t count, size_t elemSize); + +// bzero is safer than memset, but we can't rely on it, so... sk_bzero() +static inline void sk_bzero(void* buffer, size_t size) { + // Please c.f. sk_careful_memcpy. It's undefined behavior to call memset(null, 0, 0). + if (size) { + memset(buffer, 0, size); + } +} + +/** + * sk_careful_memcpy() is just like memcpy(), but guards against undefined behavior. + * + * It is undefined behavior to call memcpy() with null dst or src, even if len is 0. + * If an optimizer is "smart" enough, it can exploit this to do unexpected things. + * memcpy(dst, src, 0); + * if (src) { + * printf("%x\n", *src); + * } + * In this code the compiler can assume src is not null and omit the if (src) {...} check, + * unconditionally running the printf, crashing the program if src really is null. + * Of the compilers we pay attention to only GCC performs this optimization in practice. + */ +static inline void* sk_careful_memcpy(void* dst, const void* src, size_t len) { + // When we pass >0 len we had better already be passing valid pointers. + // So we just need to skip calling memcpy when len == 0. + if (len) { + memcpy(dst,src,len); + } + return dst; +} + +static inline void* sk_careful_memmove(void* dst, const void* src, size_t len) { + // When we pass >0 len we had better already be passing valid pointers. + // So we just need to skip calling memcpy when len == 0. + if (len) { + memmove(dst,src,len); + } + return dst; +} + +static inline int sk_careful_memcmp(const void* a, const void* b, size_t len) { + // When we pass >0 len we had better already be passing valid pointers. + // So we just need to skip calling memcmp when len == 0. + if (len == 0) { + return 0; // we treat zero-length buffers as "equal" + } + return memcmp(a, b, len); +} + +#endif // SkMalloc_DEFINED diff --git a/src/deps/skia/include/private/SkMutex.h b/src/deps/skia/include/private/SkMutex.h new file mode 100644 index 000000000..096f3ebc9 --- /dev/null +++ b/src/deps/skia/include/private/SkMutex.h @@ -0,0 +1,56 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkMutex_DEFINED +#define SkMutex_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkMacros.h" +#include "include/private/SkSemaphore.h" +#include "include/private/SkThreadAnnotations.h" +#include "include/private/SkThreadID.h" + +class SK_CAPABILITY("mutex") SkMutex { +public: + constexpr SkMutex() = default; + + void acquire() SK_ACQUIRE() { + fSemaphore.wait(); + SkDEBUGCODE(fOwner = SkGetThreadID();) + } + + void release() SK_RELEASE_CAPABILITY() { + this->assertHeld(); + SkDEBUGCODE(fOwner = kIllegalThreadID;) + fSemaphore.signal(); + } + + void assertHeld() SK_ASSERT_CAPABILITY(this) { + SkASSERT(fOwner == SkGetThreadID()); + } + +private: + SkSemaphore fSemaphore{1}; + SkDEBUGCODE(SkThreadID fOwner{kIllegalThreadID};) +}; + +class SK_SCOPED_CAPABILITY SkAutoMutexExclusive { +public: + SkAutoMutexExclusive(SkMutex& mutex) SK_ACQUIRE(mutex) : fMutex(mutex) { fMutex.acquire(); } + ~SkAutoMutexExclusive() SK_RELEASE_CAPABILITY() { fMutex.release(); } + + SkAutoMutexExclusive(const SkAutoMutexExclusive&) = delete; + SkAutoMutexExclusive(SkAutoMutexExclusive&&) = delete; + + SkAutoMutexExclusive& operator=(const SkAutoMutexExclusive&) = delete; + SkAutoMutexExclusive& operator=(SkAutoMutexExclusive&&) = delete; + +private: + SkMutex& fMutex; +}; + +#endif // SkMutex_DEFINED diff --git a/src/deps/skia/include/private/SkNoncopyable.h b/src/deps/skia/include/private/SkNoncopyable.h new file mode 100644 index 000000000..bda5d50bb --- /dev/null +++ b/src/deps/skia/include/private/SkNoncopyable.h @@ -0,0 +1,30 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkNoncopyable_DEFINED +#define SkNoncopyable_DEFINED + +#include "include/core/SkTypes.h" + +/** \class SkNoncopyable + + SkNoncopyable is the base class for objects that do not want to + be copied. It hides its copy-constructor and its assignment-operator. +*/ +class SK_API SkNoncopyable { +public: + SkNoncopyable() = default; + + SkNoncopyable(SkNoncopyable&&) = default; + SkNoncopyable& operator =(SkNoncopyable&&) = default; + +private: + SkNoncopyable(const SkNoncopyable&) = delete; + SkNoncopyable& operator=(const SkNoncopyable&) = delete; +}; + +#endif diff --git a/src/deps/skia/include/private/SkNx.h b/src/deps/skia/include/private/SkNx.h new file mode 100644 index 000000000..cf41bb0c9 --- /dev/null +++ b/src/deps/skia/include/private/SkNx.h @@ -0,0 +1,430 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkNx_DEFINED +#define SkNx_DEFINED + +#include "include/core/SkScalar.h" +#include "include/core/SkTypes.h" +#include "include/private/SkSafe_math.h" + +#include <algorithm> +#include <limits> +#include <type_traits> + +// Every single SkNx method wants to be fully inlined. (We know better than MSVC). +#define AI SK_ALWAYS_INLINE + +namespace { // NOLINT(google-build-namespaces) + +// The default SkNx<N,T> just proxies down to a pair of SkNx<N/2, T>. +template <int N, typename T> +struct SkNx { + typedef SkNx<N/2, T> Half; + + Half fLo, fHi; + + AI SkNx() = default; + AI SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {} + + AI SkNx(T v) : fLo(v), fHi(v) {} + + AI SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); } + AI SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); } + AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { + static_assert(N==8, ""); + } + AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h, + T i, T j, T k, T l, T m, T n, T o, T p) + : fLo(a,b,c,d, e,f,g,h), fHi(i,j,k,l, m,n,o,p) { + static_assert(N==16, ""); + } + + AI T operator[](int k) const { + SkASSERT(0 <= k && k < N); + return k < N/2 ? fLo[k] : fHi[k-N/2]; + } + + AI static SkNx Load(const void* vptr) { + auto ptr = (const char*)vptr; + return { Half::Load(ptr), Half::Load(ptr + N/2*sizeof(T)) }; + } + AI void store(void* vptr) const { + auto ptr = (char*)vptr; + fLo.store(ptr); + fHi.store(ptr + N/2*sizeof(T)); + } + + AI static void Load4(const void* vptr, SkNx* a, SkNx* b, SkNx* c, SkNx* d) { + auto ptr = (const char*)vptr; + Half al, bl, cl, dl, + ah, bh, ch, dh; + Half::Load4(ptr , &al, &bl, &cl, &dl); + Half::Load4(ptr + 4*N/2*sizeof(T), &ah, &bh, &ch, &dh); + *a = SkNx{al, ah}; + *b = SkNx{bl, bh}; + *c = SkNx{cl, ch}; + *d = SkNx{dl, dh}; + } + AI static void Load3(const void* vptr, SkNx* a, SkNx* b, SkNx* c) { + auto ptr = (const char*)vptr; + Half al, bl, cl, + ah, bh, ch; + Half::Load3(ptr , &al, &bl, &cl); + Half::Load3(ptr + 3*N/2*sizeof(T), &ah, &bh, &ch); + *a = SkNx{al, ah}; + *b = SkNx{bl, bh}; + *c = SkNx{cl, ch}; + } + AI static void Load2(const void* vptr, SkNx* a, SkNx* b) { + auto ptr = (const char*)vptr; + Half al, bl, + ah, bh; + Half::Load2(ptr , &al, &bl); + Half::Load2(ptr + 2*N/2*sizeof(T), &ah, &bh); + *a = SkNx{al, ah}; + *b = SkNx{bl, bh}; + } + AI static void Store4(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { + auto ptr = (char*)vptr; + Half::Store4(ptr, a.fLo, b.fLo, c.fLo, d.fLo); + Half::Store4(ptr + 4*N/2*sizeof(T), a.fHi, b.fHi, c.fHi, d.fHi); + } + AI static void Store3(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c) { + auto ptr = (char*)vptr; + Half::Store3(ptr, a.fLo, b.fLo, c.fLo); + Half::Store3(ptr + 3*N/2*sizeof(T), a.fHi, b.fHi, c.fHi); + } + AI static void Store2(void* vptr, const SkNx& a, const SkNx& b) { + auto ptr = (char*)vptr; + Half::Store2(ptr, a.fLo, b.fLo); + Half::Store2(ptr + 2*N/2*sizeof(T), a.fHi, b.fHi); + } + + AI T min() const { return std::min(fLo.min(), fHi.min()); } + AI T max() const { return std::max(fLo.max(), fHi.max()); } + AI bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); } + AI bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); } + + AI SkNx abs() const { return { fLo. abs(), fHi. abs() }; } + AI SkNx sqrt() const { return { fLo. sqrt(), fHi. sqrt() }; } + AI SkNx floor() const { return { fLo. floor(), fHi. floor() }; } + + AI SkNx operator!() const { return { !fLo, !fHi }; } + AI SkNx operator-() const { return { -fLo, -fHi }; } + AI SkNx operator~() const { return { ~fLo, ~fHi }; } + + AI SkNx operator<<(int bits) const { return { fLo << bits, fHi << bits }; } + AI SkNx operator>>(int bits) const { return { fLo >> bits, fHi >> bits }; } + + AI SkNx operator+(const SkNx& y) const { return { fLo + y.fLo, fHi + y.fHi }; } + AI SkNx operator-(const SkNx& y) const { return { fLo - y.fLo, fHi - y.fHi }; } + AI SkNx operator*(const SkNx& y) const { return { fLo * y.fLo, fHi * y.fHi }; } + AI SkNx operator/(const SkNx& y) const { return { fLo / y.fLo, fHi / y.fHi }; } + + AI SkNx operator&(const SkNx& y) const { return { fLo & y.fLo, fHi & y.fHi }; } + AI SkNx operator|(const SkNx& y) const { return { fLo | y.fLo, fHi | y.fHi }; } + AI SkNx operator^(const SkNx& y) const { return { fLo ^ y.fLo, fHi ^ y.fHi }; } + + AI SkNx operator==(const SkNx& y) const { return { fLo == y.fLo, fHi == y.fHi }; } + AI SkNx operator!=(const SkNx& y) const { return { fLo != y.fLo, fHi != y.fHi }; } + AI SkNx operator<=(const SkNx& y) const { return { fLo <= y.fLo, fHi <= y.fHi }; } + AI SkNx operator>=(const SkNx& y) const { return { fLo >= y.fLo, fHi >= y.fHi }; } + AI SkNx operator< (const SkNx& y) const { return { fLo < y.fLo, fHi < y.fHi }; } + AI SkNx operator> (const SkNx& y) const { return { fLo > y.fLo, fHi > y.fHi }; } + + AI SkNx saturatedAdd(const SkNx& y) const { + return { fLo.saturatedAdd(y.fLo), fHi.saturatedAdd(y.fHi) }; + } + + AI SkNx mulHi(const SkNx& m) const { + return { fLo.mulHi(m.fLo), fHi.mulHi(m.fHi) }; + } + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return { fLo.thenElse(t.fLo, e.fLo), fHi.thenElse(t.fHi, e.fHi) }; + } + AI static SkNx Min(const SkNx& x, const SkNx& y) { + return { Half::Min(x.fLo, y.fLo), Half::Min(x.fHi, y.fHi) }; + } + AI static SkNx Max(const SkNx& x, const SkNx& y) { + return { Half::Max(x.fLo, y.fLo), Half::Max(x.fHi, y.fHi) }; + } +}; + +// The N -> N/2 recursion bottoms out at N == 1, a scalar value. +template <typename T> +struct SkNx<1,T> { + T fVal; + + AI SkNx() = default; + AI SkNx(T v) : fVal(v) {} + + // Android complains against unused parameters, so we guard it + AI T operator[](int SkDEBUGCODE(k)) const { + SkASSERT(k == 0); + return fVal; + } + + AI static SkNx Load(const void* ptr) { + SkNx v; + memcpy(&v, ptr, sizeof(T)); + return v; + } + AI void store(void* ptr) const { memcpy(ptr, &fVal, sizeof(T)); } + + AI static void Load4(const void* vptr, SkNx* a, SkNx* b, SkNx* c, SkNx* d) { + auto ptr = (const char*)vptr; + *a = Load(ptr + 0*sizeof(T)); + *b = Load(ptr + 1*sizeof(T)); + *c = Load(ptr + 2*sizeof(T)); + *d = Load(ptr + 3*sizeof(T)); + } + AI static void Load3(const void* vptr, SkNx* a, SkNx* b, SkNx* c) { + auto ptr = (const char*)vptr; + *a = Load(ptr + 0*sizeof(T)); + *b = Load(ptr + 1*sizeof(T)); + *c = Load(ptr + 2*sizeof(T)); + } + AI static void Load2(const void* vptr, SkNx* a, SkNx* b) { + auto ptr = (const char*)vptr; + *a = Load(ptr + 0*sizeof(T)); + *b = Load(ptr + 1*sizeof(T)); + } + AI static void Store4(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { + auto ptr = (char*)vptr; + a.store(ptr + 0*sizeof(T)); + b.store(ptr + 1*sizeof(T)); + c.store(ptr + 2*sizeof(T)); + d.store(ptr + 3*sizeof(T)); + } + AI static void Store3(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c) { + auto ptr = (char*)vptr; + a.store(ptr + 0*sizeof(T)); + b.store(ptr + 1*sizeof(T)); + c.store(ptr + 2*sizeof(T)); + } + AI static void Store2(void* vptr, const SkNx& a, const SkNx& b) { + auto ptr = (char*)vptr; + a.store(ptr + 0*sizeof(T)); + b.store(ptr + 1*sizeof(T)); + } + + AI T min() const { return fVal; } + AI T max() const { return fVal; } + AI bool anyTrue() const { return fVal != 0; } + AI bool allTrue() const { return fVal != 0; } + + AI SkNx abs() const { return Abs(fVal); } + AI SkNx sqrt() const { return Sqrt(fVal); } + AI SkNx floor() const { return Floor(fVal); } + + AI SkNx operator!() const { return !fVal; } + AI SkNx operator-() const { return -fVal; } + AI SkNx operator~() const { return FromBits(~ToBits(fVal)); } + + AI SkNx operator<<(int bits) const { return fVal << bits; } + AI SkNx operator>>(int bits) const { return fVal >> bits; } + + AI SkNx operator+(const SkNx& y) const { return fVal + y.fVal; } + AI SkNx operator-(const SkNx& y) const { return fVal - y.fVal; } + AI SkNx operator*(const SkNx& y) const { return fVal * y.fVal; } + AI SkNx operator/(const SkNx& y) const { return fVal / y.fVal; } + + AI SkNx operator&(const SkNx& y) const { return FromBits(ToBits(fVal) & ToBits(y.fVal)); } + AI SkNx operator|(const SkNx& y) const { return FromBits(ToBits(fVal) | ToBits(y.fVal)); } + AI SkNx operator^(const SkNx& y) const { return FromBits(ToBits(fVal) ^ ToBits(y.fVal)); } + + AI SkNx operator==(const SkNx& y) const { return FromBits(fVal == y.fVal ? ~0 : 0); } + AI SkNx operator!=(const SkNx& y) const { return FromBits(fVal != y.fVal ? ~0 : 0); } + AI SkNx operator<=(const SkNx& y) const { return FromBits(fVal <= y.fVal ? ~0 : 0); } + AI SkNx operator>=(const SkNx& y) const { return FromBits(fVal >= y.fVal ? ~0 : 0); } + AI SkNx operator< (const SkNx& y) const { return FromBits(fVal < y.fVal ? ~0 : 0); } + AI SkNx operator> (const SkNx& y) const { return FromBits(fVal > y.fVal ? ~0 : 0); } + + AI static SkNx Min(const SkNx& x, const SkNx& y) { return x.fVal < y.fVal ? x : y; } + AI static SkNx Max(const SkNx& x, const SkNx& y) { return x.fVal > y.fVal ? x : y; } + + AI SkNx saturatedAdd(const SkNx& y) const { + static_assert(std::is_unsigned<T>::value, ""); + T sum = fVal + y.fVal; + return sum < fVal ? std::numeric_limits<T>::max() : sum; + } + + AI SkNx mulHi(const SkNx& m) const { + static_assert(std::is_unsigned<T>::value, ""); + static_assert(sizeof(T) <= 4, ""); + return static_cast<T>((static_cast<uint64_t>(fVal) * m.fVal) >> (sizeof(T)*8)); + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { return fVal != 0 ? t : e; } + +private: + // Helper functions to choose the right float/double methods. (In <cmath> madness lies...) + AI static int Abs(int val) { return val < 0 ? -val : val; } + + AI static float Abs(float val) { return ::fabsf(val); } + AI static float Sqrt(float val) { return ::sqrtf(val); } + AI static float Floor(float val) { return ::floorf(val); } + + AI static double Abs(double val) { return ::fabs(val); } + AI static double Sqrt(double val) { return ::sqrt(val); } + AI static double Floor(double val) { return ::floor(val); } + + // Helper functions for working with floats/doubles as bit patterns. + template <typename U> + AI static U ToBits(U v) { return v; } + AI static int32_t ToBits(float v) { int32_t bits; memcpy(&bits, &v, sizeof(v)); return bits; } + AI static int64_t ToBits(double v) { int64_t bits; memcpy(&bits, &v, sizeof(v)); return bits; } + + template <typename Bits> + AI static T FromBits(Bits bits) { + static_assert(std::is_pod<T >::value && + std::is_pod<Bits>::value && + sizeof(T) <= sizeof(Bits), ""); + T val; + memcpy(&val, &bits, sizeof(T)); + return val; + } +}; + +// Allow scalars on the left or right of binary operators, and things like +=, &=, etc. +#define V template <int N, typename T> AI static SkNx<N,T> + V operator+ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) + y; } + V operator- (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) - y; } + V operator* (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) * y; } + V operator/ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) / y; } + V operator& (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) & y; } + V operator| (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) | y; } + V operator^ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) ^ y; } + V operator==(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) == y; } + V operator!=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) != y; } + V operator<=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) <= y; } + V operator>=(T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) >= y; } + V operator< (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) < y; } + V operator> (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) > y; } + + V operator+ (const SkNx<N,T>& x, T y) { return x + SkNx<N,T>(y); } + V operator- (const SkNx<N,T>& x, T y) { return x - SkNx<N,T>(y); } + V operator* (const SkNx<N,T>& x, T y) { return x * SkNx<N,T>(y); } + V operator/ (const SkNx<N,T>& x, T y) { return x / SkNx<N,T>(y); } + V operator& (const SkNx<N,T>& x, T y) { return x & SkNx<N,T>(y); } + V operator| (const SkNx<N,T>& x, T y) { return x | SkNx<N,T>(y); } + V operator^ (const SkNx<N,T>& x, T y) { return x ^ SkNx<N,T>(y); } + V operator==(const SkNx<N,T>& x, T y) { return x == SkNx<N,T>(y); } + V operator!=(const SkNx<N,T>& x, T y) { return x != SkNx<N,T>(y); } + V operator<=(const SkNx<N,T>& x, T y) { return x <= SkNx<N,T>(y); } + V operator>=(const SkNx<N,T>& x, T y) { return x >= SkNx<N,T>(y); } + V operator< (const SkNx<N,T>& x, T y) { return x < SkNx<N,T>(y); } + V operator> (const SkNx<N,T>& x, T y) { return x > SkNx<N,T>(y); } + + V& operator<<=(SkNx<N,T>& x, int bits) { return (x = x << bits); } + V& operator>>=(SkNx<N,T>& x, int bits) { return (x = x >> bits); } + + V& operator +=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x + y); } + V& operator -=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x - y); } + V& operator *=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x * y); } + V& operator /=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x / y); } + V& operator &=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x & y); } + V& operator |=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x | y); } + V& operator ^=(SkNx<N,T>& x, const SkNx<N,T>& y) { return (x = x ^ y); } + + V& operator +=(SkNx<N,T>& x, T y) { return (x = x + SkNx<N,T>(y)); } + V& operator -=(SkNx<N,T>& x, T y) { return (x = x - SkNx<N,T>(y)); } + V& operator *=(SkNx<N,T>& x, T y) { return (x = x * SkNx<N,T>(y)); } + V& operator /=(SkNx<N,T>& x, T y) { return (x = x / SkNx<N,T>(y)); } + V& operator &=(SkNx<N,T>& x, T y) { return (x = x & SkNx<N,T>(y)); } + V& operator |=(SkNx<N,T>& x, T y) { return (x = x | SkNx<N,T>(y)); } + V& operator ^=(SkNx<N,T>& x, T y) { return (x = x ^ SkNx<N,T>(y)); } +#undef V + +// SkNx<N,T> ~~> SkNx<N/2,T> + SkNx<N/2,T> +template <int N, typename T> +AI static void SkNx_split(const SkNx<N,T>& v, SkNx<N/2,T>* lo, SkNx<N/2,T>* hi) { + *lo = v.fLo; + *hi = v.fHi; +} + +// SkNx<N/2,T> + SkNx<N/2,T> ~~> SkNx<N,T> +template <int N, typename T> +AI static SkNx<N*2,T> SkNx_join(const SkNx<N,T>& lo, const SkNx<N,T>& hi) { + return { lo, hi }; +} + +// A very generic shuffle. Can reorder, duplicate, contract, expand... +// Sk4f v = { R,G,B,A }; +// SkNx_shuffle<2,1,0,3>(v) ~~> {B,G,R,A} +// SkNx_shuffle<2,1>(v) ~~> {B,G} +// SkNx_shuffle<2,1,2,1,2,1,2,1>(v) ~~> {B,G,B,G,B,G,B,G} +// SkNx_shuffle<3,3,3,3>(v) ~~> {A,A,A,A} +template <int... Ix, int N, typename T> +AI static SkNx<sizeof...(Ix),T> SkNx_shuffle(const SkNx<N,T>& v) { + return { v[Ix]... }; +} + +// Cast from SkNx<N, Src> to SkNx<N, Dst>, as if you called static_cast<Dst>(Src). +template <typename Dst, typename Src, int N> +AI static SkNx<N,Dst> SkNx_cast(const SkNx<N,Src>& v) { + return { SkNx_cast<Dst>(v.fLo), SkNx_cast<Dst>(v.fHi) }; +} +template <typename Dst, typename Src> +AI static SkNx<1,Dst> SkNx_cast(const SkNx<1,Src>& v) { + return static_cast<Dst>(v.fVal); +} + +template <int N, typename T> +AI static SkNx<N,T> SkNx_fma(const SkNx<N,T>& f, const SkNx<N,T>& m, const SkNx<N,T>& a) { + return f*m+a; +} + +} // namespace + +typedef SkNx<2, float> Sk2f; +typedef SkNx<4, float> Sk4f; +typedef SkNx<8, float> Sk8f; +typedef SkNx<16, float> Sk16f; + +typedef SkNx<2, SkScalar> Sk2s; +typedef SkNx<4, SkScalar> Sk4s; +typedef SkNx<8, SkScalar> Sk8s; +typedef SkNx<16, SkScalar> Sk16s; + +typedef SkNx<4, uint8_t> Sk4b; +typedef SkNx<8, uint8_t> Sk8b; +typedef SkNx<16, uint8_t> Sk16b; + +typedef SkNx<4, uint16_t> Sk4h; +typedef SkNx<8, uint16_t> Sk8h; +typedef SkNx<16, uint16_t> Sk16h; + +typedef SkNx<4, int32_t> Sk4i; +typedef SkNx<8, int32_t> Sk8i; +typedef SkNx<4, uint32_t> Sk4u; + +// Include platform specific specializations if available. +#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 + #include "include/private/SkNx_sse.h" +#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON) + #include "include/private/SkNx_neon.h" +#else + +AI static Sk4i Sk4f_round(const Sk4f& x) { + return { (int) lrintf (x[0]), + (int) lrintf (x[1]), + (int) lrintf (x[2]), + (int) lrintf (x[3]), }; +} + +#endif + +AI static void Sk4f_ToBytes(uint8_t p[16], + const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) { + SkNx_cast<uint8_t>(SkNx_join(SkNx_join(a,b), SkNx_join(c,d))).store(p); +} + +#undef AI + +#endif//SkNx_DEFINED diff --git a/src/deps/skia/include/private/SkNx_neon.h b/src/deps/skia/include/private/SkNx_neon.h new file mode 100644 index 000000000..a5e2e0109 --- /dev/null +++ b/src/deps/skia/include/private/SkNx_neon.h @@ -0,0 +1,713 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkNx_neon_DEFINED +#define SkNx_neon_DEFINED + +#include <arm_neon.h> + +namespace { // NOLINT(google-build-namespaces) + +// ARMv8 has vrndm(q)_f32 to floor floats. Here we emulate it: +// - roundtrip through integers via truncation +// - subtract 1 if that's too big (possible for negative values). +// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big. +AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) { + auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); + auto too_big = vcgtq_f32(roundtrip, v); + return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1))); +} +AI static float32x2_t emulate_vrndm_f32(float32x2_t v) { + auto roundtrip = vcvt_f32_s32(vcvt_s32_f32(v)); + auto too_big = vcgt_f32(roundtrip, v); + return vsub_f32(roundtrip, (float32x2_t)vand_u32(too_big, (uint32x2_t)vdup_n_f32(1))); +} + +template <> +class SkNx<2, float> { +public: + AI SkNx(float32x2_t vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(float val) : fVec(vdup_n_f32(val)) {} + AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } + + AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } + AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } + + AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { + float32x2x2_t xy = vld2_f32((const float*) ptr); + *x = xy.val[0]; + *y = xy.val[1]; + } + + AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { + float32x2x2_t ab = {{ + a.fVec, + b.fVec, + }}; + vst2_f32((float*) dst, ab); + } + + AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { + float32x2x3_t abc = {{ + a.fVec, + b.fVec, + c.fVec, + }}; + vst3_f32((float*) dst, abc); + } + + AI static void Store4(void* dst, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { + float32x2x4_t abcd = {{ + a.fVec, + b.fVec, + c.fVec, + d.fVec, + }}; + vst4_f32((float*) dst, abcd); + } + + AI SkNx operator - () const { return vneg_f32(fVec); } + + AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); } + AI SkNx operator / (const SkNx& o) const { + #if defined(SK_CPU_ARM64) + return vdiv_f32(fVec, o.fVec); + #else + float32x2_t est0 = vrecpe_f32(o.fVec), + est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0), + est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1); + return vmul_f32(fVec, est2); + #endif + } + + AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); } + AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); } + AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); } + AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); } + AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); } + AI SkNx operator!=(const SkNx& o) const { + return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec))); + } + + AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); } + AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); } + + AI SkNx abs() const { return vabs_f32(fVec); } + AI SkNx floor() const { + #if defined(SK_CPU_ARM64) + return vrndm_f32(fVec); + #else + return emulate_vrndm_f32(fVec); + #endif + } + + AI SkNx sqrt() const { + #if defined(SK_CPU_ARM64) + return vsqrt_f32(fVec); + #else + float32x2_t est0 = vrsqrte_f32(fVec), + est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0), + est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1); + return vmul_f32(fVec, est2); + #endif + } + + AI float operator[](int k) const { + SkASSERT(0 <= k && k < 2); + union { float32x2_t v; float fs[2]; } pun = {fVec}; + return pun.fs[k&1]; + } + + AI bool allTrue() const { + #if defined(SK_CPU_ARM64) + return 0 != vminv_u32(vreinterpret_u32_f32(fVec)); + #else + auto v = vreinterpret_u32_f32(fVec); + return vget_lane_u32(v,0) && vget_lane_u32(v,1); + #endif + } + AI bool anyTrue() const { + #if defined(SK_CPU_ARM64) + return 0 != vmaxv_u32(vreinterpret_u32_f32(fVec)); + #else + auto v = vreinterpret_u32_f32(fVec); + return vget_lane_u32(v,0) || vget_lane_u32(v,1); + #endif + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return vbsl_f32(vreinterpret_u32_f32(fVec), t.fVec, e.fVec); + } + + float32x2_t fVec; +}; + +template <> +class SkNx<4, float> { +public: + AI SkNx(float32x4_t vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(float val) : fVec(vdupq_n_f32(val)) {} + AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; } + + AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); } + AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } + + AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { + float32x4x2_t xy = vld2q_f32((const float*) ptr); + *x = xy.val[0]; + *y = xy.val[1]; + } + + AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { + float32x4x4_t rgba = vld4q_f32((const float*) ptr); + *r = rgba.val[0]; + *g = rgba.val[1]; + *b = rgba.val[2]; + *a = rgba.val[3]; + } + AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { + float32x4x4_t rgba = {{ + r.fVec, + g.fVec, + b.fVec, + a.fVec, + }}; + vst4q_f32((float*) dst, rgba); + } + + AI SkNx operator - () const { return vnegq_f32(fVec); } + + AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); } + AI SkNx operator / (const SkNx& o) const { + #if defined(SK_CPU_ARM64) + return vdivq_f32(fVec, o.fVec); + #else + float32x4_t est0 = vrecpeq_f32(o.fVec), + est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0), + est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1); + return vmulq_f32(fVec, est2); + #endif + } + + AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));} + AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));} + AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));} + AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));} + AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));} + AI SkNx operator!=(const SkNx& o) const { + return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); + } + + AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); } + AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); } + + AI SkNx abs() const { return vabsq_f32(fVec); } + AI SkNx floor() const { + #if defined(SK_CPU_ARM64) + return vrndmq_f32(fVec); + #else + return emulate_vrndmq_f32(fVec); + #endif + } + + + AI SkNx sqrt() const { + #if defined(SK_CPU_ARM64) + return vsqrtq_f32(fVec); + #else + float32x4_t est0 = vrsqrteq_f32(fVec), + est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0), + est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); + return vmulq_f32(fVec, est2); + #endif + } + + AI float operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { float32x4_t v; float fs[4]; } pun = {fVec}; + return pun.fs[k&3]; + } + + AI float min() const { + #if defined(SK_CPU_ARM64) + return vminvq_f32(fVec); + #else + SkNx min = Min(*this, vrev64q_f32(fVec)); + return std::min(min[0], min[2]); + #endif + } + + AI float max() const { + #if defined(SK_CPU_ARM64) + return vmaxvq_f32(fVec); + #else + SkNx max = Max(*this, vrev64q_f32(fVec)); + return std::max(max[0], max[2]); + #endif + } + + AI bool allTrue() const { + #if defined(SK_CPU_ARM64) + return 0 != vminvq_u32(vreinterpretq_u32_f32(fVec)); + #else + auto v = vreinterpretq_u32_f32(fVec); + return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) + && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); + #endif + } + AI bool anyTrue() const { + #if defined(SK_CPU_ARM64) + return 0 != vmaxvq_u32(vreinterpretq_u32_f32(fVec)); + #else + auto v = vreinterpretq_u32_f32(fVec); + return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) + || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); + #endif + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec); + } + + float32x4_t fVec; +}; + +#if defined(SK_CPU_ARM64) + AI static Sk4f SkNx_fma(const Sk4f& f, const Sk4f& m, const Sk4f& a) { + return vfmaq_f32(a.fVec, f.fVec, m.fVec); + } +#endif + +// It's possible that for our current use cases, representing this as +// half a uint16x8_t might be better than representing it as a uint16x4_t. +// It'd make conversion to Sk4b one step simpler. +template <> +class SkNx<4, uint16_t> { +public: + AI SkNx(const uint16x4_t& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {} + AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { + fVec = (uint16x4_t) { a,b,c,d }; + } + + AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } + AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } + + AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { + uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr); + *r = rgba.val[0]; + *g = rgba.val[1]; + *b = rgba.val[2]; + *a = rgba.val[3]; + } + AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { + uint16x4x3_t rgba = vld3_u16((const uint16_t*)ptr); + *r = rgba.val[0]; + *g = rgba.val[1]; + *b = rgba.val[2]; + } + AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { + uint16x4x4_t rgba = {{ + r.fVec, + g.fVec, + b.fVec, + a.fVec, + }}; + vst4_u16((uint16_t*) dst, rgba); + } + + AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); } + AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } + AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } + + AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); } + + AI uint16_t operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { uint16x4_t v; uint16_t us[4]; } pun = {fVec}; + return pun.us[k&3]; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return vbsl_u16(fVec, t.fVec, e.fVec); + } + + uint16x4_t fVec; +}; + +template <> +class SkNx<8, uint16_t> { +public: + AI SkNx(const uint16x8_t& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {} + AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); } + + AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, + uint16_t e, uint16_t f, uint16_t g, uint16_t h) { + fVec = (uint16x8_t) { a,b,c,d, e,f,g,h }; + } + + AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } + + AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); } + AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } + AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } + + AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); } + + AI uint16_t operator[](int k) const { + SkASSERT(0 <= k && k < 8); + union { uint16x8_t v; uint16_t us[8]; } pun = {fVec}; + return pun.us[k&7]; + } + + AI SkNx mulHi(const SkNx& m) const { + uint32x4_t hi = vmull_u16(vget_high_u16(fVec), vget_high_u16(m.fVec)); + uint32x4_t lo = vmull_u16( vget_low_u16(fVec), vget_low_u16(m.fVec)); + + return { vcombine_u16(vshrn_n_u32(lo,16), vshrn_n_u32(hi,16)) }; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return vbslq_u16(fVec, t.fVec, e.fVec); + } + + uint16x8_t fVec; +}; + +template <> +class SkNx<4, uint8_t> { +public: + typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t; + + AI SkNx(const uint8x8_t& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) { + fVec = (uint8x8_t){a,b,c,d, 0,0,0,0}; + } + AI static SkNx Load(const void* ptr) { + return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr); + } + AI void store(void* ptr) const { + return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0); + } + AI uint8_t operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { uint8x8_t v; uint8_t us[8]; } pun = {fVec}; + return pun.us[k&3]; + } + + // TODO as needed + + uint8x8_t fVec; +}; + +template <> +class SkNx<8, uint8_t> { +public: + AI SkNx(const uint8x8_t& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint8_t val) : fVec(vdup_n_u8(val)) {} + AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g, uint8_t h) { + fVec = (uint8x8_t) { a,b,c,d, e,f,g,h }; + } + + AI static SkNx Load(const void* ptr) { return vld1_u8((const uint8_t*)ptr); } + AI void store(void* ptr) const { vst1_u8((uint8_t*)ptr, fVec); } + + AI uint8_t operator[](int k) const { + SkASSERT(0 <= k && k < 8); + union { uint8x8_t v; uint8_t us[8]; } pun = {fVec}; + return pun.us[k&7]; + } + + uint8x8_t fVec; +}; + +template <> +class SkNx<16, uint8_t> { +public: + AI SkNx(const uint8x16_t& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {} + AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g, uint8_t h, + uint8_t i, uint8_t j, uint8_t k, uint8_t l, + uint8_t m, uint8_t n, uint8_t o, uint8_t p) { + fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p }; + } + + AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); } + AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } + + AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } + + AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); } + AI SkNx operator & (const SkNx& o) const { return vandq_u8(fVec, o.fVec); } + + AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); } + AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); } + + AI uint8_t operator[](int k) const { + SkASSERT(0 <= k && k < 16); + union { uint8x16_t v; uint8_t us[16]; } pun = {fVec}; + return pun.us[k&15]; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return vbslq_u8(fVec, t.fVec, e.fVec); + } + + uint8x16_t fVec; +}; + +template <> +class SkNx<4, int32_t> { +public: + AI SkNx(const int32x4_t& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(int32_t v) { + fVec = vdupq_n_s32(v); + } + AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) { + fVec = (int32x4_t){a,b,c,d}; + } + AI static SkNx Load(const void* ptr) { + return vld1q_s32((const int32_t*)ptr); + } + AI void store(void* ptr) const { + return vst1q_s32((int32_t*)ptr, fVec); + } + AI int32_t operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { int32x4_t v; int32_t is[4]; } pun = {fVec}; + return pun.is[k&3]; + } + + AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } + + AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); } + AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } + AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } + + AI SkNx operator == (const SkNx& o) const { + return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); + } + AI SkNx operator < (const SkNx& o) const { + return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); + } + AI SkNx operator > (const SkNx& o) const { + return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); + } + + AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } + AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); } + // TODO as needed + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); + } + + AI SkNx abs() const { return vabsq_s32(fVec); } + + int32x4_t fVec; +}; + +template <> +class SkNx<4, uint32_t> { +public: + AI SkNx(const uint32x4_t& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint32_t v) { + fVec = vdupq_n_u32(v); + } + AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { + fVec = (uint32x4_t){a,b,c,d}; + } + AI static SkNx Load(const void* ptr) { + return vld1q_u32((const uint32_t*)ptr); + } + AI void store(void* ptr) const { + return vst1q_u32((uint32_t*)ptr, fVec); + } + AI uint32_t operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { uint32x4_t v; uint32_t us[4]; } pun = {fVec}; + return pun.us[k&3]; + } + + AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); } + + AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); } + AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } + AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } + + AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); } + AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); } + AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); } + + AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); } + // TODO as needed + + AI SkNx mulHi(const SkNx& m) const { + uint64x2_t hi = vmull_u32(vget_high_u32(fVec), vget_high_u32(m.fVec)); + uint64x2_t lo = vmull_u32( vget_low_u32(fVec), vget_low_u32(m.fVec)); + + return { vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)) }; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return vbslq_u32(fVec, t.fVec, e.fVec); + } + + uint32x4_t fVec; +}; + +template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) { + return vcvtq_s32_f32(src.fVec); + +} +template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) { + return vcvtq_f32_s32(src.fVec); +} +template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) { + return SkNx_cast<float>(Sk4i::Load(&src)); +} + +template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { + return vqmovn_u32(vcvtq_u32_f32(src.fVec)); +} + +template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { + return vcvtq_f32_u32(vmovl_u16(src.fVec)); +} + +template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { + uint32x4_t _32 = vcvtq_u32_f32(src.fVec); + uint16x4_t _16 = vqmovn_u32(_32); + return vqmovn_u16(vcombine_u16(_16, _16)); +} + +template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) { + uint16x8_t _16 = vmovl_u8(src.fVec); + return vmovl_u16(vget_low_u16(_16)); +} + +template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) { + return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec); +} + +template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { + return vcvtq_f32_s32(SkNx_cast<int32_t>(src).fVec); +} + +template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { + Sk8f ab, cd; + SkNx_split(src, &ab, &cd); + + Sk4f a,b,c,d; + SkNx_split(ab, &a, &b); + SkNx_split(cd, &c, &d); + return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec), + (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0], + vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec), + (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]; +} + +template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) { + Sk4i a, b; + SkNx_split(src, &a, &b); + uint16x4_t a16 = vqmovun_s32(a.fVec); + uint16x4_t b16 = vqmovun_s32(b.fVec); + + return vqmovn_u16(vcombine_u16(a16, b16)); +} + +template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { + return vget_low_u16(vmovl_u8(src.fVec)); +} + +template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) { + return vmovl_u8(src.fVec); +} + +template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { + return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); +} + +template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) { + return vqmovn_u16(src.fVec); +} + +template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) { + uint16x4_t _16 = vqmovun_s32(src.fVec); + return vqmovn_u16(vcombine_u16(_16, _16)); +} + +template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) { + uint16x4_t _16 = vqmovn_u32(src.fVec); + return vqmovn_u16(vcombine_u16(_16, _16)); +} + +template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) { + return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); +} + +template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) { + return vmovn_u32(vreinterpretq_u32_s32(src.fVec)); +} + +template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) { + return vreinterpretq_s32_u32(src.fVec); +} + +AI static Sk4i Sk4f_round(const Sk4f& x) { + return vcvtq_s32_f32((x + 0.5f).fVec); +} + +} // namespace + +#endif//SkNx_neon_DEFINED diff --git a/src/deps/skia/include/private/SkNx_sse.h b/src/deps/skia/include/private/SkNx_sse.h new file mode 100644 index 000000000..e07f780e5 --- /dev/null +++ b/src/deps/skia/include/private/SkNx_sse.h @@ -0,0 +1,823 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkNx_sse_DEFINED +#define SkNx_sse_DEFINED + +#include "include/core/SkTypes.h" + +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + #include <smmintrin.h> +#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + #include <tmmintrin.h> +#else + #include <emmintrin.h> +#endif + +// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent. +// If you do, make sure this is in a static inline function... anywhere else risks violating ODR. + +namespace { // NOLINT(google-build-namespaces) + +// Emulate _mm_floor_ps() with SSE2: +// - roundtrip through integers via truncation +// - subtract 1 if that's too big (possible for negative values). +// This restricts the domain of our inputs to a maximum somehwere around 2^31. +// Seems plenty big. +AI static __m128 emulate_mm_floor_ps(__m128 v) { + __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v)); + __m128 too_big = _mm_cmpgt_ps(roundtrip, v); + return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); +} + +template <> +class SkNx<2, float> { +public: + AI SkNx(const __m128& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(float val) : fVec(_mm_set1_ps(val)) {} + AI static SkNx Load(const void* ptr) { + return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr)); + } + AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} + + AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } + + AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { + const float* m = (const float*)ptr; + *x = SkNx{m[0], m[2]}; + *y = SkNx{m[1], m[3]}; + } + + AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { + auto vals = _mm_unpacklo_ps(a.fVec, b.fVec); + _mm_storeu_ps((float*)dst, vals); + } + + AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { + auto lo = _mm_setr_ps(a[0], b[0], c[0], a[1]), + hi = _mm_setr_ps(b[1], c[1], 0, 0); + _mm_storeu_ps((float*)dst, lo); + _mm_storel_pi(((__m64*)dst) + 2, hi); + } + + AI static void Store4(void* dst, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { + auto lo = _mm_setr_ps(a[0], b[0], c[0], d[0]), + hi = _mm_setr_ps(a[1], b[1], c[1], d[1]); + _mm_storeu_ps((float*)dst, lo); + _mm_storeu_ps(((float*)dst) + 4, hi); + } + + AI SkNx operator - () const { return _mm_xor_ps(_mm_set1_ps(-0.0f), fVec); } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } + AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } + + AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); } + AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); } + AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); } + AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); } + AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); } + AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); } + + AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); } + AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); } + + AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } + AI SkNx floor() const { + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_floor_ps(fVec); + #else + return emulate_mm_floor_ps(fVec); + #endif + } + + AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); } + + AI float operator[](int k) const { + SkASSERT(0 <= k && k < 2); + union { __m128 v; float fs[4]; } pun = {fVec}; + return pun.fs[k&1]; + } + + AI bool allTrue() const { return 0b11 == (_mm_movemask_ps(fVec) & 0b11); } + AI bool anyTrue() const { return 0b00 != (_mm_movemask_ps(fVec) & 0b11); } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_blendv_ps(e.fVec, t.fVec, fVec); + #else + return _mm_or_ps(_mm_and_ps (fVec, t.fVec), + _mm_andnot_ps(fVec, e.fVec)); + #endif + } + + __m128 fVec; +}; + +template <> +class SkNx<4, float> { +public: + AI SkNx(const __m128& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(float val) : fVec( _mm_set1_ps(val) ) {} + AI SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {} + + AI static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); } + AI void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); } + + AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { + SkNx lo = SkNx::Load((const float*)ptr+0), + hi = SkNx::Load((const float*)ptr+4); + *x = SkNx{lo[0], lo[2], hi[0], hi[2]}; + *y = SkNx{lo[1], lo[3], hi[1], hi[3]}; + } + + AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { + __m128 v0 = _mm_loadu_ps(((float*)ptr) + 0), + v1 = _mm_loadu_ps(((float*)ptr) + 4), + v2 = _mm_loadu_ps(((float*)ptr) + 8), + v3 = _mm_loadu_ps(((float*)ptr) + 12); + _MM_TRANSPOSE4_PS(v0, v1, v2, v3); + *r = v0; + *g = v1; + *b = v2; + *a = v3; + } + AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { + __m128 v0 = r.fVec, + v1 = g.fVec, + v2 = b.fVec, + v3 = a.fVec; + _MM_TRANSPOSE4_PS(v0, v1, v2, v3); + _mm_storeu_ps(((float*) dst) + 0, v0); + _mm_storeu_ps(((float*) dst) + 4, v1); + _mm_storeu_ps(((float*) dst) + 8, v2); + _mm_storeu_ps(((float*) dst) + 12, v3); + } + + AI SkNx operator - () const { return _mm_xor_ps(_mm_set1_ps(-0.0f), fVec); } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } + AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } + + AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); } + AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); } + AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); } + AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); } + AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); } + AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); } + + AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); } + AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); } + + AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } + AI SkNx floor() const { + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_floor_ps(fVec); + #else + return emulate_mm_floor_ps(fVec); + #endif + } + + AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); } + + AI float operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { __m128 v; float fs[4]; } pun = {fVec}; + return pun.fs[k&3]; + } + + AI float min() const { + SkNx min = Min(*this, _mm_shuffle_ps(fVec, fVec, _MM_SHUFFLE(2,3,0,1))); + min = Min(min, _mm_shuffle_ps(min.fVec, min.fVec, _MM_SHUFFLE(0,1,2,3))); + return min[0]; + } + + AI float max() const { + SkNx max = Max(*this, _mm_shuffle_ps(fVec, fVec, _MM_SHUFFLE(2,3,0,1))); + max = Max(max, _mm_shuffle_ps(max.fVec, max.fVec, _MM_SHUFFLE(0,1,2,3))); + return max[0]; + } + + AI bool allTrue() const { return 0b1111 == _mm_movemask_ps(fVec); } + AI bool anyTrue() const { return 0b0000 != _mm_movemask_ps(fVec); } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_blendv_ps(e.fVec, t.fVec, fVec); + #else + return _mm_or_ps(_mm_and_ps (fVec, t.fVec), + _mm_andnot_ps(fVec, e.fVec)); + #endif + } + + __m128 fVec; +}; + +AI static __m128i mullo32(__m128i a, __m128i b) { +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_mullo_epi32(a, b); +#else + __m128i mul20 = _mm_mul_epu32(a, b), + mul31 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4)); + return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)), + _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0))); +#endif +} + +template <> +class SkNx<4, int32_t> { +public: + AI SkNx(const __m128i& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {} + AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } + AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {} + + AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); } + + AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } + AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } + AI SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); } + + AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); } + AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); } + AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); } + + AI int32_t operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { __m128i v; int32_t is[4]; } pun = {fVec}; + return pun.is[k&3]; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_blendv_epi8(e.fVec, t.fVec, fVec); + #else + return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), + _mm_andnot_si128(fVec, e.fVec)); + #endif + } + + AI SkNx abs() const { +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + return _mm_abs_epi32(fVec); +#else + SkNx mask = (*this) >> 31; + return (mask ^ (*this)) - mask; +#endif + } + + AI static SkNx Min(const SkNx& x, const SkNx& y) { +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_min_epi32(x.fVec, y.fVec); +#else + return (x < y).thenElse(x, y); +#endif + } + + AI static SkNx Max(const SkNx& x, const SkNx& y) { +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_max_epi32(x.fVec, y.fVec); +#else + return (x > y).thenElse(x, y); +#endif + } + + __m128i fVec; +}; + +template <> +class SkNx<2, uint32_t> { +public: + AI SkNx(const __m128i& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint32_t val) : fVec(_mm_set1_epi32((int)val)) {} + AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); } + AI SkNx(uint32_t a, uint32_t b) : fVec(_mm_setr_epi32((int)a,(int)b,0,0)) {} + + AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); } + + AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } + AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } + AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); } + + AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); } + AI SkNx operator != (const SkNx& o) const { return (*this == o) ^ 0xffffffff; } + // operator < and > take a little extra fiddling to make work for unsigned ints. + + AI uint32_t operator[](int k) const { + SkASSERT(0 <= k && k < 2); + union { __m128i v; uint32_t us[4]; } pun = {fVec}; + return pun.us[k&1]; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_blendv_epi8(e.fVec, t.fVec, fVec); +#else + return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), + _mm_andnot_si128(fVec, e.fVec)); +#endif + } + + AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(fVec) & 0xff); } + + __m128i fVec; +}; + +template <> +class SkNx<4, uint32_t> { +public: + AI SkNx(const __m128i& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint32_t val) : fVec(_mm_set1_epi32((int)val)) {} + AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } + AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) + : fVec(_mm_setr_epi32((int)a,(int)b,(int)c,(int)d)) {} + + AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return mullo32(fVec, o.fVec); } + + AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } + AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } + AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); } + + AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); } + AI SkNx operator != (const SkNx& o) const { return (*this == o) ^ 0xffffffff; } + + // operator < and > take a little extra fiddling to make work for unsigned ints. + + AI uint32_t operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { __m128i v; uint32_t us[4]; } pun = {fVec}; + return pun.us[k&3]; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + return _mm_blendv_epi8(e.fVec, t.fVec, fVec); + #else + return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), + _mm_andnot_si128(fVec, e.fVec)); + #endif + } + + AI SkNx mulHi(SkNx m) const { + SkNx v20{_mm_mul_epu32(m.fVec, fVec)}; + SkNx v31{_mm_mul_epu32(_mm_srli_si128(m.fVec, 4), _mm_srli_si128(fVec, 4))}; + + return SkNx{v20[1], v31[1], v20[3], v31[3]}; + } + + __m128i fVec; +}; + +template <> +class SkNx<4, uint16_t> { +public: + AI SkNx(const __m128i& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint16_t val) : fVec(_mm_set1_epi16((short)val)) {} + AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) + : fVec(_mm_setr_epi16((short)a,(short)b,(short)c,(short)d,0,0,0,0)) {} + + AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); } + AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); } + + AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { + __m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0), + hi = _mm_loadu_si128(((__m128i*)ptr) + 1); + __m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2 + odd = _mm_unpackhi_epi16(lo, hi); // r1 r3 ... + __m128i rg = _mm_unpacklo_epi16(even, odd), // r0 r1 r2 r3 g0 g1 g2 g3 + ba = _mm_unpackhi_epi16(even, odd); // b0 b1 ... a0 a1 ... + *r = rg; + *g = _mm_srli_si128(rg, 8); + *b = ba; + *a = _mm_srli_si128(ba, 8); + } + AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { + // The idea here is to get 4 vectors that are R G B _ _ _ _ _. + // The second load is at a funny location to make sure we don't read past + // the bounds of memory. This is fine, we just need to shift it a little bit. + const uint8_t* ptr8 = (const uint8_t*) ptr; + __m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0)); + __m128i rgb1 = _mm_srli_si128(rgb0, 3*2); + __m128i rgb2 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 4*2)), 2*2); + __m128i rgb3 = _mm_srli_si128(rgb2, 3*2); + + __m128i rrggbb01 = _mm_unpacklo_epi16(rgb0, rgb1); + __m128i rrggbb23 = _mm_unpacklo_epi16(rgb2, rgb3); + *r = _mm_unpacklo_epi32(rrggbb01, rrggbb23); + *g = _mm_srli_si128(r->fVec, 4*2); + *b = _mm_unpackhi_epi32(rrggbb01, rrggbb23); + } + AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { + __m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec); + __m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec); + __m128i lo = _mm_unpacklo_epi32(rg, ba); + __m128i hi = _mm_unpackhi_epi32(rg, ba); + _mm_storeu_si128(((__m128i*) dst) + 0, lo); + _mm_storeu_si128(((__m128i*) dst) + 1, hi); + } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); } + AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } + AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } + + AI uint16_t operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { __m128i v; uint16_t us[8]; } pun = {fVec}; + return pun.us[k&3]; + } + + __m128i fVec; +}; + +template <> +class SkNx<8, uint16_t> { +public: + AI SkNx(const __m128i& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint16_t val) : fVec(_mm_set1_epi16((short)val)) {} + AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, + uint16_t e, uint16_t f, uint16_t g, uint16_t h) + : fVec(_mm_setr_epi16((short)a,(short)b,(short)c,(short)d, + (short)e,(short)f,(short)g,(short)h)) {} + + AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } + AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } + + AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { + __m128i _01 = _mm_loadu_si128(((__m128i*)ptr) + 0), + _23 = _mm_loadu_si128(((__m128i*)ptr) + 1), + _45 = _mm_loadu_si128(((__m128i*)ptr) + 2), + _67 = _mm_loadu_si128(((__m128i*)ptr) + 3); + + __m128i _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2 + _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3 + _46 = _mm_unpacklo_epi16(_45, _67), + _57 = _mm_unpackhi_epi16(_45, _67); + + __m128i rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3 + ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3 + rg4567 = _mm_unpacklo_epi16(_46, _57), + ba4567 = _mm_unpackhi_epi16(_46, _57); + + *r = _mm_unpacklo_epi64(rg0123, rg4567); + *g = _mm_unpackhi_epi64(rg0123, rg4567); + *b = _mm_unpacklo_epi64(ba0123, ba4567); + *a = _mm_unpackhi_epi64(ba0123, ba4567); + } + AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { + const uint8_t* ptr8 = (const uint8_t*) ptr; + __m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0*2)); + __m128i rgb1 = _mm_srli_si128(rgb0, 3*2); + __m128i rgb2 = _mm_loadu_si128((const __m128i*) (ptr8 + 6*2)); + __m128i rgb3 = _mm_srli_si128(rgb2, 3*2); + __m128i rgb4 = _mm_loadu_si128((const __m128i*) (ptr8 + 12*2)); + __m128i rgb5 = _mm_srli_si128(rgb4, 3*2); + __m128i rgb6 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 16*2)), 2*2); + __m128i rgb7 = _mm_srli_si128(rgb6, 3*2); + + __m128i rgb01 = _mm_unpacklo_epi16(rgb0, rgb1); + __m128i rgb23 = _mm_unpacklo_epi16(rgb2, rgb3); + __m128i rgb45 = _mm_unpacklo_epi16(rgb4, rgb5); + __m128i rgb67 = _mm_unpacklo_epi16(rgb6, rgb7); + + __m128i rg03 = _mm_unpacklo_epi32(rgb01, rgb23); + __m128i bx03 = _mm_unpackhi_epi32(rgb01, rgb23); + __m128i rg47 = _mm_unpacklo_epi32(rgb45, rgb67); + __m128i bx47 = _mm_unpackhi_epi32(rgb45, rgb67); + + *r = _mm_unpacklo_epi64(rg03, rg47); + *g = _mm_unpackhi_epi64(rg03, rg47); + *b = _mm_unpacklo_epi64(bx03, bx47); + } + AI static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { + __m128i rg0123 = _mm_unpacklo_epi16(r.fVec, g.fVec), // r0 g0 r1 g1 r2 g2 r3 g3 + rg4567 = _mm_unpackhi_epi16(r.fVec, g.fVec), // r4 g4 r5 g5 r6 g6 r7 g7 + ba0123 = _mm_unpacklo_epi16(b.fVec, a.fVec), + ba4567 = _mm_unpackhi_epi16(b.fVec, a.fVec); + + _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg0123, ba0123)); + _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg0123, ba0123)); + _mm_storeu_si128((__m128i*)ptr + 2, _mm_unpacklo_epi32(rg4567, ba4567)); + _mm_storeu_si128((__m128i*)ptr + 3, _mm_unpackhi_epi32(rg4567, ba4567)); + } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); } + AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); } + AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } + AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } + + AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } + AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } + + AI static SkNx Min(const SkNx& a, const SkNx& b) { + // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the + // signed version, _mm_min_epi16, then shift back. + const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine. + const __m128i top_8x = _mm_set1_epi16((short)top); + return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x), + _mm_sub_epi8(b.fVec, top_8x))); + } + + AI SkNx mulHi(const SkNx& m) const { + return _mm_mulhi_epu16(fVec, m.fVec); + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), + _mm_andnot_si128(fVec, e.fVec)); + } + + AI uint16_t operator[](int k) const { + SkASSERT(0 <= k && k < 8); + union { __m128i v; uint16_t us[8]; } pun = {fVec}; + return pun.us[k&7]; + } + + __m128i fVec; +}; + +template <> +class SkNx<4, uint8_t> { +public: + AI SkNx() {} + AI SkNx(const __m128i& vec) : fVec(vec) {} + AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) + : fVec(_mm_setr_epi8((char)a,(char)b,(char)c,(char)d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {} + + AI static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); } + AI void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); } + + AI uint8_t operator[](int k) const { + SkASSERT(0 <= k && k < 4); + union { __m128i v; uint8_t us[16]; } pun = {fVec}; + return pun.us[k&3]; + } + + // TODO as needed + + __m128i fVec; +}; + +template <> +class SkNx<8, uint8_t> { +public: + AI SkNx(const __m128i& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint8_t val) : fVec(_mm_set1_epi8((char)val)) {} + AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); } + AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g, uint8_t h) + : fVec(_mm_setr_epi8((char)a,(char)b,(char)c,(char)d, + (char)e,(char)f,(char)g,(char)h, + 0,0,0,0, 0,0,0,0)) {} + + AI void store(void* ptr) const {_mm_storel_epi64((__m128i*)ptr, fVec);} + + AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); } + + AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); } + AI SkNx operator < (const SkNx& o) const { + // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare. + auto flip = _mm_set1_epi8(char(0x80)); + return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec)); + } + + AI uint8_t operator[](int k) const { + SkASSERT(0 <= k && k < 16); + union { __m128i v; uint8_t us[16]; } pun = {fVec}; + return pun.us[k&15]; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), + _mm_andnot_si128(fVec, e.fVec)); + } + + __m128i fVec; +}; + +template <> +class SkNx<16, uint8_t> { +public: + AI SkNx(const __m128i& vec) : fVec(vec) {} + + AI SkNx() {} + AI SkNx(uint8_t val) : fVec(_mm_set1_epi8((char)val)) {} + AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } + AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g, uint8_t h, + uint8_t i, uint8_t j, uint8_t k, uint8_t l, + uint8_t m, uint8_t n, uint8_t o, uint8_t p) + : fVec(_mm_setr_epi8((char)a,(char)b,(char)c,(char)d, + (char)e,(char)f,(char)g,(char)h, + (char)i,(char)j,(char)k,(char)l, + (char)m,(char)n,(char)o,(char)p)) {} + + AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } + + AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); } + + AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); } + AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); } + AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } + + AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); } + AI SkNx operator < (const SkNx& o) const { + // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare. + auto flip = _mm_set1_epi8(char(0x80)); + return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec)); + } + + AI uint8_t operator[](int k) const { + SkASSERT(0 <= k && k < 16); + union { __m128i v; uint8_t us[16]; } pun = {fVec}; + return pun.us[k&15]; + } + + AI SkNx thenElse(const SkNx& t, const SkNx& e) const { + return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), + _mm_andnot_si128(fVec, e.fVec)); + } + + __m128i fVec; +}; + +template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) { + return _mm_cvtepi32_ps(src.fVec); +} + +template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) { + return SkNx_cast<float>(Sk4i::Load(&src)); +} + +template <> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) { + return _mm_cvttps_epi32(src.fVec); +} + +template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) { +#if 0 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 + // TODO: This seems to be causing code generation problems. Investigate? + return _mm_packus_epi32(src.fVec); +#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place. + const int _ = ~0; + return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_)); +#else + // With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want. + __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16); + return _mm_packs_epi32(x,x); +#endif +} + +template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { + return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src)); +} + +template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { + auto _32 = _mm_cvttps_epi32(src.fVec); +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + const int _ = ~0; + return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_)); +#else + auto _16 = _mm_packus_epi16(_32, _32); + return _mm_packus_epi16(_16, _16); +#endif +} + +template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) { +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + const int _ = ~0; + return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_)); +#else + auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); + return _mm_unpacklo_epi16(_16, _mm_setzero_si128()); +#endif +} + +template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) { + return SkNx_cast<uint32_t>(src).fVec; +} + +template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { + return _mm_cvtepi32_ps(SkNx_cast<int32_t>(src).fVec); +} + +template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { + auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); + return _mm_cvtepi32_ps(_32); +} + +template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) { + Sk4i lo, hi; + SkNx_split(src, &lo, &hi); + + auto t = _mm_packs_epi32(lo.fVec, hi.fVec); + return _mm_packus_epi16(t, t); +} + +template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { + Sk8f ab, cd; + SkNx_split(src, &ab, &cd); + + Sk4f a,b,c,d; + SkNx_split(ab, &a, &b); + SkNx_split(cd, &c, &d); + + return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), + _mm_cvttps_epi32(b.fVec)), + _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), + _mm_cvttps_epi32(d.fVec))); +} + +template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { + return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); +} + +template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) { + return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); +} + +template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { + return _mm_packus_epi16(src.fVec, src.fVec); +} + +template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) { + return _mm_packus_epi16(src.fVec, src.fVec); +} + +template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) { + return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); +} + + +template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) { + return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec); +} + +template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) { + return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec); +} + +template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) { + return src.fVec; +} + +AI static Sk4i Sk4f_round(const Sk4f& x) { + return _mm_cvtps_epi32(x.fVec); +} + +} // namespace + +#endif//SkNx_sse_DEFINED diff --git a/src/deps/skia/include/private/SkOnce.h b/src/deps/skia/include/private/SkOnce.h new file mode 100644 index 000000000..edf3e8335 --- /dev/null +++ b/src/deps/skia/include/private/SkOnce.h @@ -0,0 +1,53 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkOnce_DEFINED +#define SkOnce_DEFINED + +#include "include/private/SkThreadAnnotations.h" +#include <atomic> +#include <utility> + +// SkOnce provides call-once guarantees for Skia, much like std::once_flag/std::call_once(). +// +// There should be no particularly error-prone gotcha use cases when using SkOnce. +// It works correctly as a class member, a local, a global, a function-scoped static, whatever. + +class SkOnce { +public: + constexpr SkOnce() = default; + + template <typename Fn, typename... Args> + void operator()(Fn&& fn, Args&&... args) { + auto state = fState.load(std::memory_order_acquire); + + if (state == Done) { + return; + } + + // If it looks like no one has started calling fn(), try to claim that job. + if (state == NotStarted && fState.compare_exchange_strong(state, Claimed, + std::memory_order_relaxed, + std::memory_order_relaxed)) { + // Great! We'll run fn() then notify the other threads by releasing Done into fState. + fn(std::forward<Args>(args)...); + return fState.store(Done, std::memory_order_release); + } + + // Some other thread is calling fn(). + // We'll just spin here acquiring until it releases Done into fState. + SK_POTENTIALLY_BLOCKING_REGION_BEGIN; + while (fState.load(std::memory_order_acquire) != Done) { /*spin*/ } + SK_POTENTIALLY_BLOCKING_REGION_END; + } + +private: + enum State : uint8_t { NotStarted, Claimed, Done}; + std::atomic<uint8_t> fState{NotStarted}; +}; + +#endif // SkOnce_DEFINED diff --git a/src/deps/skia/include/private/SkOpts_spi.h b/src/deps/skia/include/private/SkOpts_spi.h new file mode 100644 index 000000000..e57dc1433 --- /dev/null +++ b/src/deps/skia/include/private/SkOpts_spi.h @@ -0,0 +1,21 @@ +/* + * Copyright 2020 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkOpts_spi_DEFINED +#define SkOpts_spi_DEFINED + +#include "include/core/SkTypes.h" + +// These are exposed as SK_SPI (e.g. SkParagraph), the rest of SkOpts is +// declared in src/core + +namespace SkOpts { + // The fastest high quality 32-bit hash we can provide on this platform. + extern uint32_t SK_SPI (*hash_fn)(const void* data, size_t bytes, uint32_t seed); +} // namespace SkOpts + +#endif diff --git a/src/deps/skia/include/private/SkPaintParamsKey.h b/src/deps/skia/include/private/SkPaintParamsKey.h new file mode 100644 index 000000000..44a88f59f --- /dev/null +++ b/src/deps/skia/include/private/SkPaintParamsKey.h @@ -0,0 +1,110 @@ +/* + * Copyright 2022 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPaintParamsKey_DEFINED +#define SkPaintParamsKey_DEFINED + +#include <array> +#include <limits> +#include "include/core/SkTypes.h" + +enum class SkBackend : uint8_t { + kGanesh, + kGraphite, + kSkVM +}; + +// TODO: this needs to be expanded into a more flexible dictionary (esp. for user-supplied SkSL) +// TODO: should this enum actually be in ShaderCodeDictionary.h? +enum class CodeSnippetID : uint8_t { + // TODO: It seems like this requires some refinement. Fundamentally this doesn't seem like a + // draw that originated from a PaintParams. + kDepthStencilOnlyDraw, + + // SkShader code snippets + kSolidColorShader, + kLinearGradientShader, + kRadialGradientShader, + kSweepGradientShader, + kConicalGradientShader, + + // BlendMode code snippets + kSimpleBlendMode, + + kLast = kSimpleBlendMode +}; +static constexpr int kCodeSnippetIDCount = static_cast<int>(CodeSnippetID::kLast) + 1; + +// This class is a compact representation of the shader needed to implement a given +// PaintParams. Its structure is a series of blocks where each block has a +// header that consists of 2-bytes - a 1-byte code-snippet ID and a 1-byte number-of-bytes-in-the- +// block field. The rest of the data in the block is dependent on the individual code snippet. +class SkPaintParamsKey { +public: + static const int kBlockHeaderSizeInBytes = 2; + static const int kBlockSizeOffsetInBytes = 1; // offset to the block size w/in the header + + // Block headers have the following structure: + // 1st byte: codeSnippetID + // 2nd byte: total blockSize in bytes + // Returns the header's offset in the key - to be passed back into endBlock + int beginBlock(CodeSnippetID codeSnippetID) { + SkASSERT(fNumBytes < kMaxKeySize); + + this->addByte((uint8_t) codeSnippetID); + this->addByte(0); // this needs to be patched up with a call to endBlock + return fNumBytes - kBlockHeaderSizeInBytes; + } + + // Update the size byte of a block header + void endBlock(int headerOffset, CodeSnippetID codeSnippetID) { + SkASSERT(fData[headerOffset] == (uint32_t) codeSnippetID); + int blockSize = fNumBytes - headerOffset; + SkASSERT(blockSize <= kMaxBlockSize); + fData[headerOffset+1] = blockSize; + } + + std::pair<CodeSnippetID, uint8_t> readCodeSnippetID(int headerOffset) const { + SkASSERT(headerOffset < kMaxKeySize - kBlockHeaderSizeInBytes); + + CodeSnippetID id = static_cast<CodeSnippetID>(fData[headerOffset]); + uint8_t blockSize = fData[headerOffset+1]; + SkASSERT(headerOffset + blockSize <= this->sizeInBytes()); + + return { id, blockSize }; + } + + void addByte(uint8_t byte) { + SkASSERT(fNumBytes < kMaxKeySize); + + fData[fNumBytes++] = byte; + } + +#ifdef SK_DEBUG + static int DumpBlock(const SkPaintParamsKey&, int headerOffset); + void dump() const; +#endif + + uint8_t byte(int offset) const { SkASSERT(offset < fNumBytes); return fData[offset]; } + const void* data() const { return fData.data(); } + int sizeInBytes() const { return fNumBytes; } + + bool operator==(const SkPaintParamsKey& that) const; + bool operator!=(const SkPaintParamsKey& that) const { return !(*this == that); } + +private: + // TODO: need to make it so the key can can dynamically grow + static const int kMaxKeySize = 32; + static const int kMaxBlockSize = std::numeric_limits<uint8_t>::max(); + + // TODO: It is probably overkill but we could encode the SkBackend in the first byte of + // the key. + int fNumBytes = 0; + std::array<uint8_t, kMaxKeySize> fData; +}; + +#endif // SkPaintParamsKey_DEFINED diff --git a/src/deps/skia/include/private/SkPathRef.h b/src/deps/skia/include/private/SkPathRef.h new file mode 100644 index 000000000..301f3b751 --- /dev/null +++ b/src/deps/skia/include/private/SkPathRef.h @@ -0,0 +1,536 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPathRef_DEFINED +#define SkPathRef_DEFINED + +#include "include/core/SkMatrix.h" +#include "include/core/SkPoint.h" +#include "include/core/SkRRect.h" +#include "include/core/SkRect.h" +#include "include/core/SkRefCnt.h" +#include "include/private/SkIDChangeListener.h" +#include "include/private/SkMutex.h" +#include "include/private/SkTDArray.h" +#include "include/private/SkTemplates.h" +#include "include/private/SkTo.h" + +#include <atomic> +#include <limits> +#include <tuple> + +class SkRBuffer; +class SkWBuffer; + +enum class SkPathConvexity { + kConvex, + kConcave, + kUnknown, +}; + +enum class SkPathFirstDirection { + kCW, // == SkPathDirection::kCW + kCCW, // == SkPathDirection::kCCW + kUnknown, +}; + +// These are computed from a stream of verbs +struct SkPathVerbAnalysis { + bool valid; + int points, weights; + unsigned segmentMask; +}; +SkPathVerbAnalysis sk_path_analyze_verbs(const uint8_t verbs[], int count); + + +/** + * Holds the path verbs and points. It is versioned by a generation ID. None of its public methods + * modify the contents. To modify or append to the verbs/points wrap the SkPathRef in an + * SkPathRef::Editor object. Installing the editor resets the generation ID. It also performs + * copy-on-write if the SkPathRef is shared by multiple SkPaths. The caller passes the Editor's + * constructor a pointer to a sk_sp<SkPathRef>, which may be updated to point to a new SkPathRef + * after the editor's constructor returns. + * + * The points and verbs are stored in a single allocation. The points are at the begining of the + * allocation while the verbs are stored at end of the allocation, in reverse order. Thus the points + * and verbs both grow into the middle of the allocation until the meet. To access verb i in the + * verb array use ref.verbs()[~i] (because verbs() returns a pointer just beyond the first + * logical verb or the last verb in memory). + */ + +class SK_API SkPathRef final : public SkNVRefCnt<SkPathRef> { +public: + SkPathRef(SkTDArray<SkPoint> points, SkTDArray<uint8_t> verbs, SkTDArray<SkScalar> weights, + unsigned segmentMask) + : fPoints(std::move(points)) + , fVerbs(std::move(verbs)) + , fConicWeights(std::move(weights)) + { + fBoundsIsDirty = true; // this also invalidates fIsFinite + fGenerationID = 0; // recompute + fSegmentMask = segmentMask; + fIsOval = false; + fIsRRect = false; + // The next two values don't matter unless fIsOval or fIsRRect are true. + fRRectOrOvalIsCCW = false; + fRRectOrOvalStartIdx = 0xAC; + SkDEBUGCODE(fEditorsAttached.store(0);) + + this->computeBounds(); // do this now, before we worry about multiple owners/threads + SkDEBUGCODE(this->validate();) + } + + class Editor { + public: + Editor(sk_sp<SkPathRef>* pathRef, + int incReserveVerbs = 0, + int incReservePoints = 0); + + ~Editor() { SkDEBUGCODE(fPathRef->fEditorsAttached--;) } + + /** + * Returns the array of points. + */ + SkPoint* writablePoints() { return fPathRef->getWritablePoints(); } + const SkPoint* points() const { return fPathRef->points(); } + + /** + * Gets the ith point. Shortcut for this->points() + i + */ + SkPoint* atPoint(int i) { return fPathRef->getWritablePoints() + i; } + const SkPoint* atPoint(int i) const { return &fPathRef->fPoints[i]; } + + /** + * Adds the verb and allocates space for the number of points indicated by the verb. The + * return value is a pointer to where the points for the verb should be written. + * 'weight' is only used if 'verb' is kConic_Verb + */ + SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight = 0) { + SkDEBUGCODE(fPathRef->validate();) + return fPathRef->growForVerb(verb, weight); + } + + /** + * Allocates space for multiple instances of a particular verb and the + * requisite points & weights. + * The return pointer points at the first new point (indexed normally [<i>]). + * If 'verb' is kConic_Verb, 'weights' will return a pointer to the + * space for the conic weights (indexed normally). + */ + SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb, + int numVbs, + SkScalar** weights = nullptr) { + return fPathRef->growForRepeatedVerb(verb, numVbs, weights); + } + + /** + * Concatenates all verbs from 'path' onto the pathRef's verbs array. Increases the point + * count by the number of points in 'path', and the conic weight count by the number of + * conics in 'path'. + * + * Returns pointers to the uninitialized points and conic weights data. + */ + std::tuple<SkPoint*, SkScalar*> growForVerbsInPath(const SkPathRef& path) { + return fPathRef->growForVerbsInPath(path); + } + + /** + * Resets the path ref to a new verb and point count. The new verbs and points are + * uninitialized. + */ + void resetToSize(int newVerbCnt, int newPointCnt, int newConicCount) { + fPathRef->resetToSize(newVerbCnt, newPointCnt, newConicCount); + } + + /** + * Gets the path ref that is wrapped in the Editor. + */ + SkPathRef* pathRef() { return fPathRef; } + + void setIsOval(bool isOval, bool isCCW, unsigned start) { + fPathRef->setIsOval(isOval, isCCW, start); + } + + void setIsRRect(bool isRRect, bool isCCW, unsigned start) { + fPathRef->setIsRRect(isRRect, isCCW, start); + } + + void setBounds(const SkRect& rect) { fPathRef->setBounds(rect); } + + private: + SkPathRef* fPathRef; + }; + + class SK_API Iter { + public: + Iter(); + Iter(const SkPathRef&); + + void setPathRef(const SkPathRef&); + + /** Return the next verb in this iteration of the path. When all + segments have been visited, return kDone_Verb. + + If any point in the path is non-finite, return kDone_Verb immediately. + + @param pts The points representing the current verb and/or segment + This must not be NULL. + @return The verb for the current segment + */ + uint8_t next(SkPoint pts[4]); + uint8_t peek() const; + + SkScalar conicWeight() const { return *fConicWeights; } + + private: + const SkPoint* fPts; + const uint8_t* fVerbs; + const uint8_t* fVerbStop; + const SkScalar* fConicWeights; + }; + +public: + /** + * Gets a path ref with no verbs or points. + */ + static SkPathRef* CreateEmpty(); + + /** + * Returns true if all of the points in this path are finite, meaning there + * are no infinities and no NaNs. + */ + bool isFinite() const { + if (fBoundsIsDirty) { + this->computeBounds(); + } + return SkToBool(fIsFinite); + } + + /** + * Returns a mask, where each bit corresponding to a SegmentMask is + * set if the path contains 1 or more segments of that type. + * Returns 0 for an empty path (no segments). + */ + uint32_t getSegmentMasks() const { return fSegmentMask; } + + /** Returns true if the path is an oval. + * + * @param rect returns the bounding rect of this oval. It's a circle + * if the height and width are the same. + * @param isCCW is the oval CCW (or CW if false). + * @param start indicates where the contour starts on the oval (see + * SkPath::addOval for intepretation of the index). + * + * @return true if this path is an oval. + * Tracking whether a path is an oval is considered an + * optimization for performance and so some paths that are in + * fact ovals can report false. + */ + bool isOval(SkRect* rect, bool* isCCW, unsigned* start) const { + if (fIsOval) { + if (rect) { + *rect = this->getBounds(); + } + if (isCCW) { + *isCCW = SkToBool(fRRectOrOvalIsCCW); + } + if (start) { + *start = fRRectOrOvalStartIdx; + } + } + + return SkToBool(fIsOval); + } + + bool isRRect(SkRRect* rrect, bool* isCCW, unsigned* start) const { + if (fIsRRect) { + if (rrect) { + *rrect = this->getRRect(); + } + if (isCCW) { + *isCCW = SkToBool(fRRectOrOvalIsCCW); + } + if (start) { + *start = fRRectOrOvalStartIdx; + } + } + return SkToBool(fIsRRect); + } + + + bool hasComputedBounds() const { + return !fBoundsIsDirty; + } + + /** Returns the bounds of the path's points. If the path contains 0 or 1 + points, the bounds is set to (0,0,0,0), and isEmpty() will return true. + Note: this bounds may be larger than the actual shape, since curves + do not extend as far as their control points. + */ + const SkRect& getBounds() const { + if (fBoundsIsDirty) { + this->computeBounds(); + } + return fBounds; + } + + SkRRect getRRect() const; + + /** + * Transforms a path ref by a matrix, allocating a new one only if necessary. + */ + static void CreateTransformedCopy(sk_sp<SkPathRef>* dst, + const SkPathRef& src, + const SkMatrix& matrix); + + // static SkPathRef* CreateFromBuffer(SkRBuffer* buffer); + + /** + * Rollsback a path ref to zero verbs and points with the assumption that the path ref will be + * repopulated with approximately the same number of verbs and points. A new path ref is created + * only if necessary. + */ + static void Rewind(sk_sp<SkPathRef>* pathRef); + + ~SkPathRef(); + int countPoints() const { return fPoints.count(); } + int countVerbs() const { return fVerbs.count(); } + int countWeights() const { return fConicWeights.count(); } + + size_t approximateBytesUsed() const; + + /** + * Returns a pointer one beyond the first logical verb (last verb in memory order). + */ + const uint8_t* verbsBegin() const { return fVerbs.begin(); } + + /** + * Returns a const pointer to the first verb in memory (which is the last logical verb). + */ + const uint8_t* verbsEnd() const { return fVerbs.end(); } + + /** + * Returns a const pointer to the first point. + */ + const SkPoint* points() const { return fPoints.begin(); } + + /** + * Shortcut for this->points() + this->countPoints() + */ + const SkPoint* pointsEnd() const { return this->points() + this->countPoints(); } + + const SkScalar* conicWeights() const { return fConicWeights.begin(); } + const SkScalar* conicWeightsEnd() const { return fConicWeights.end(); } + + /** + * Convenience methods for getting to a verb or point by index. + */ + uint8_t atVerb(int index) const { return fVerbs[index]; } + const SkPoint& atPoint(int index) const { return fPoints[index]; } + + bool operator== (const SkPathRef& ref) const; + + /** + * Writes the path points and verbs to a buffer. + */ + void writeToBuffer(SkWBuffer* buffer) const; + + /** + * Gets the number of bytes that would be written in writeBuffer() + */ + uint32_t writeSize() const; + + void interpolate(const SkPathRef& ending, SkScalar weight, SkPathRef* out) const; + + /** + * Gets an ID that uniquely identifies the contents of the path ref. If two path refs have the + * same ID then they have the same verbs and points. However, two path refs may have the same + * contents but different genIDs. + */ + uint32_t genID() const; + + void addGenIDChangeListener(sk_sp<SkIDChangeListener>); // Threadsafe. + int genIDChangeListenerCount(); // Threadsafe + + bool dataMatchesVerbs() const; + bool isValid() const; + SkDEBUGCODE(void validate() const { SkASSERT(this->isValid()); } ) + +private: + enum SerializationOffsets { + kLegacyRRectOrOvalStartIdx_SerializationShift = 28, // requires 3 bits, ignored. + kLegacyRRectOrOvalIsCCW_SerializationShift = 27, // requires 1 bit, ignored. + kLegacyIsRRect_SerializationShift = 26, // requires 1 bit, ignored. + kIsFinite_SerializationShift = 25, // requires 1 bit + kLegacyIsOval_SerializationShift = 24, // requires 1 bit, ignored. + kSegmentMask_SerializationShift = 0 // requires 4 bits (deprecated) + }; + + SkPathRef() { + fBoundsIsDirty = true; // this also invalidates fIsFinite + fGenerationID = kEmptyGenID; + fSegmentMask = 0; + fIsOval = false; + fIsRRect = false; + // The next two values don't matter unless fIsOval or fIsRRect are true. + fRRectOrOvalIsCCW = false; + fRRectOrOvalStartIdx = 0xAC; + SkDEBUGCODE(fEditorsAttached.store(0);) + SkDEBUGCODE(this->validate();) + } + + void copy(const SkPathRef& ref, int additionalReserveVerbs, int additionalReservePoints); + + // Return true if the computed bounds are finite. + static bool ComputePtBounds(SkRect* bounds, const SkPathRef& ref) { + return bounds->setBoundsCheck(ref.points(), ref.countPoints()); + } + + // called, if dirty, by getBounds() + void computeBounds() const { + SkDEBUGCODE(this->validate();) + // TODO(mtklein): remove fBoundsIsDirty and fIsFinite, + // using an inverted rect instead of fBoundsIsDirty and always recalculating fIsFinite. + SkASSERT(fBoundsIsDirty); + + fIsFinite = ComputePtBounds(&fBounds, *this); + fBoundsIsDirty = false; + } + + void setBounds(const SkRect& rect) { + SkASSERT(rect.fLeft <= rect.fRight && rect.fTop <= rect.fBottom); + fBounds = rect; + fBoundsIsDirty = false; + fIsFinite = fBounds.isFinite(); + } + + /** Makes additional room but does not change the counts or change the genID */ + void incReserve(int additionalVerbs, int additionalPoints) { + SkDEBUGCODE(this->validate();) + fPoints.setReserve(fPoints.count() + additionalPoints); + fVerbs.setReserve(fVerbs.count() + additionalVerbs); + SkDEBUGCODE(this->validate();) + } + + /** Resets the path ref with verbCount verbs and pointCount points, all uninitialized. Also + * allocates space for reserveVerb additional verbs and reservePoints additional points.*/ + void resetToSize(int verbCount, int pointCount, int conicCount, + int reserveVerbs = 0, int reservePoints = 0) { + SkDEBUGCODE(this->validate();) + this->callGenIDChangeListeners(); + fBoundsIsDirty = true; // this also invalidates fIsFinite + fGenerationID = 0; + + fSegmentMask = 0; + fIsOval = false; + fIsRRect = false; + + fPoints.setReserve(pointCount + reservePoints); + fPoints.setCount(pointCount); + fVerbs.setReserve(verbCount + reserveVerbs); + fVerbs.setCount(verbCount); + fConicWeights.setCount(conicCount); + SkDEBUGCODE(this->validate();) + } + + /** + * Increases the verb count by numVbs and point count by the required amount. + * The new points are uninitialized. All the new verbs are set to the specified + * verb. If 'verb' is kConic_Verb, 'weights' will return a pointer to the + * uninitialized conic weights. + */ + SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb, int numVbs, SkScalar** weights); + + /** + * Increases the verb count 1, records the new verb, and creates room for the requisite number + * of additional points. A pointer to the first point is returned. Any new points are + * uninitialized. + */ + SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight); + + /** + * Concatenates all verbs from 'path' onto our own verbs array. Increases the point count by the + * number of points in 'path', and the conic weight count by the number of conics in 'path'. + * + * Returns pointers to the uninitialized points and conic weights data. + */ + std::tuple<SkPoint*, SkScalar*> growForVerbsInPath(const SkPathRef& path); + + /** + * Private, non-const-ptr version of the public function verbsMemBegin(). + */ + uint8_t* verbsBeginWritable() { return fVerbs.begin(); } + + /** + * Called the first time someone calls CreateEmpty to actually create the singleton. + */ + friend SkPathRef* sk_create_empty_pathref(); + + void setIsOval(bool isOval, bool isCCW, unsigned start) { + fIsOval = isOval; + fRRectOrOvalIsCCW = isCCW; + fRRectOrOvalStartIdx = SkToU8(start); + } + + void setIsRRect(bool isRRect, bool isCCW, unsigned start) { + fIsRRect = isRRect; + fRRectOrOvalIsCCW = isCCW; + fRRectOrOvalStartIdx = SkToU8(start); + } + + // called only by the editor. Note that this is not a const function. + SkPoint* getWritablePoints() { + SkDEBUGCODE(this->validate();) + fIsOval = false; + fIsRRect = false; + return fPoints.begin(); + } + + const SkPoint* getPoints() const { + SkDEBUGCODE(this->validate();) + return fPoints.begin(); + } + + void callGenIDChangeListeners(); + + enum { + kMinSize = 256, + }; + + mutable SkRect fBounds; + + SkTDArray<SkPoint> fPoints; + SkTDArray<uint8_t> fVerbs; + SkTDArray<SkScalar> fConicWeights; + + enum { + kEmptyGenID = 1, // GenID reserved for path ref with zero points and zero verbs. + }; + mutable uint32_t fGenerationID; + SkDEBUGCODE(std::atomic<int> fEditorsAttached;) // assert only one editor in use at any time. + + SkIDChangeListener::List fGenIDChangeListeners; + + mutable uint8_t fBoundsIsDirty; + mutable bool fIsFinite; // only meaningful if bounds are valid + + bool fIsOval; + bool fIsRRect; + // Both the circle and rrect special cases have a notion of direction and starting point + // The next two variables store that information for either. + bool fRRectOrOvalIsCCW; + uint8_t fRRectOrOvalStartIdx; + uint8_t fSegmentMask; + + friend class PathRefTest_Private; + friend class ForceIsRRect_Private; // unit test isRRect + friend class SkPath; + friend class SkPathBuilder; + friend class SkPathPriv; +}; + +#endif diff --git a/src/deps/skia/include/private/SkSLDefines.h b/src/deps/skia/include/private/SkSLDefines.h new file mode 100644 index 000000000..50024b357 --- /dev/null +++ b/src/deps/skia/include/private/SkSLDefines.h @@ -0,0 +1,56 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DEFINES +#define SKSL_DEFINES + +#include <cstdint> + +#include "include/core/SkTypes.h" +#include "include/private/SkTArray.h" + +using SKSL_INT = int64_t; +using SKSL_FLOAT = float; + +namespace SkSL { + +class Expression; +class Statement; + +using ComponentArray = SkSTArray<4, int8_t>; // for Swizzles +using ExpressionArray = SkSTArray<2, std::unique_ptr<Expression>>; +using StatementArray = SkSTArray<2, std::unique_ptr<Statement>>; + +// Functions larger than this (measured in IR nodes) will not be inlined. This growth factor +// accounts for the number of calls being inlined--i.e., a function called five times (that is, with +// five inlining opportunities) would be considered 5x larger than if it were called once. This +// default threshold value is arbitrary, but tends to work well in practice. +static constexpr int kDefaultInlineThreshold = 50; + +// A hard upper limit on the number of variable slots allowed in a function/global scope. +// This is an arbitrary limit, but is needed to prevent code generation from taking unbounded +// amounts of time or space. +static constexpr int kVariableSlotLimit = 100000; + +// The SwizzleComponent namespace is used both by the SkSL::Swizzle expression, and the DSL swizzle. +// This namespace is injected into SkSL::dsl so that `using namespace SkSL::dsl` enables DSL code +// like `Swizzle(var, X, Y, ONE)` to compile without any extra qualifications. +namespace SwizzleComponent { + +enum Type : int8_t { + X = 0, Y = 1, Z = 2, W = 3, + R = 4, G = 5, B = 6, A = 7, + S = 8, T = 9, P = 10, Q = 11, + UL = 12, UT = 13, UR = 14, UB = 15, + ZERO, + ONE +}; + +} // namespace SwizzleComponent +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/private/SkSLIRNode.h b/src/deps/skia/include/private/SkSLIRNode.h new file mode 100644 index 000000000..2e545c349 --- /dev/null +++ b/src/deps/skia/include/private/SkSLIRNode.h @@ -0,0 +1,63 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_IRNODE +#define SKSL_IRNODE + +#include "include/private/SkSLString.h" +#include "include/private/SkTArray.h" +#include "src/sksl/SkSLLexer.h" +#include "src/sksl/SkSLModifiersPool.h" +#include "src/sksl/SkSLPool.h" + +#include <algorithm> +#include <atomic> +#include <unordered_set> +#include <vector> + +namespace SkSL { + +class Expression; +class FunctionDeclaration; +class FunctionDefinition; +class Statement; +class Symbol; +class SymbolTable; +class Type; +class Variable; +class VariableReference; +enum class VariableRefKind : int8_t; +enum class VariableStorage : int8_t; + +/** + * Represents a node in the intermediate representation (IR) tree. The IR is a fully-resolved + * version of the program (all types determined, everything validated), ready for code generation. + */ +class IRNode : public Poolable { +public: + virtual ~IRNode() {} + + virtual String description() const = 0; + + // No copy construction or assignment + IRNode(const IRNode&) = delete; + IRNode& operator=(const IRNode&) = delete; + + // line of this element within the program being compiled, for error reporting purposes + int fLine; + +protected: + IRNode(int line, int kind) + : fLine(line) + , fKind(kind) {} + + int fKind; +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/private/SkSLLayout.h b/src/deps/skia/include/private/SkSLLayout.h new file mode 100644 index 000000000..d3654dd43 --- /dev/null +++ b/src/deps/skia/include/private/SkSLLayout.h @@ -0,0 +1,143 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_LAYOUT +#define SKSL_LAYOUT + +#include "include/private/SkSLString.h" + +namespace SkSL { + +/** + * Represents a layout block appearing before a variable declaration, as in: + * + * layout (location = 0) int x; + */ +struct Layout { + enum Flag { + kOriginUpperLeft_Flag = 1 << 0, + kPushConstant_Flag = 1 << 1, + kBlendSupportAllEquations_Flag = 1 << 2, + kColor_Flag = 1 << 3, + + // These flags indicate if the qualifier appeared, regardless of the accompanying value. + kLocation_Flag = 1 << 4, + kOffset_Flag = 1 << 5, + kBinding_Flag = 1 << 6, + kIndex_Flag = 1 << 7, + kSet_Flag = 1 << 8, + kBuiltin_Flag = 1 << 9, + kInputAttachmentIndex_Flag = 1 << 10, + }; + + Layout(int flags, int location, int offset, int binding, int index, int set, int builtin, + int inputAttachmentIndex) + : fFlags(flags) + , fLocation(location) + , fOffset(offset) + , fBinding(binding) + , fIndex(index) + , fSet(set) + , fBuiltin(builtin) + , fInputAttachmentIndex(inputAttachmentIndex) {} + + Layout() + : fFlags(0) + , fLocation(-1) + , fOffset(-1) + , fBinding(-1) + , fIndex(-1) + , fSet(-1) + , fBuiltin(-1) + , fInputAttachmentIndex(-1) {} + + static Layout builtin(int builtin) { + Layout result; + result.fBuiltin = builtin; + return result; + } + + String description() const { + String result; + auto separator = [firstSeparator = true]() mutable -> String { + if (firstSeparator) { + firstSeparator = false; + return ""; + } else { + return ", "; + }}; + if (fLocation >= 0) { + result += separator() + "location = " + to_string(fLocation); + } + if (fOffset >= 0) { + result += separator() + "offset = " + to_string(fOffset); + } + if (fBinding >= 0) { + result += separator() + "binding = " + to_string(fBinding); + } + if (fIndex >= 0) { + result += separator() + "index = " + to_string(fIndex); + } + if (fSet >= 0) { + result += separator() + "set = " + to_string(fSet); + } + if (fBuiltin >= 0) { + result += separator() + "builtin = " + to_string(fBuiltin); + } + if (fInputAttachmentIndex >= 0) { + result += separator() + "input_attachment_index = " + to_string(fInputAttachmentIndex); + } + if (fFlags & kOriginUpperLeft_Flag) { + result += separator() + "origin_upper_left"; + } + if (fFlags & kBlendSupportAllEquations_Flag) { + result += separator() + "blend_support_all_equations"; + } + if (fFlags & kPushConstant_Flag) { + result += separator() + "push_constant"; + } + if (fFlags & kColor_Flag) { + result += separator() + "color"; + } + if (result.size() > 0) { + result = "layout (" + result + ")"; + } + return result; + } + + bool operator==(const Layout& other) const { + return fFlags == other.fFlags && + fLocation == other.fLocation && + fOffset == other.fOffset && + fBinding == other.fBinding && + fIndex == other.fIndex && + fSet == other.fSet && + fBuiltin == other.fBuiltin && + fInputAttachmentIndex == other.fInputAttachmentIndex; + } + + bool operator!=(const Layout& other) const { + return !(*this == other); + } + + int fFlags; + int fLocation; + int fOffset; + int fBinding; + int fIndex; + int fSet; + // builtin comes from SPIR-V and identifies which particular builtin value this object + // represents. + int fBuiltin; + // input_attachment_index comes from Vulkan/SPIR-V to connect a shader variable to the a + // corresponding attachment on the subpass in which the shader is being used. + int fInputAttachmentIndex; +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/private/SkSLModifiers.h b/src/deps/skia/include/private/SkSLModifiers.h new file mode 100644 index 000000000..a881e57e0 --- /dev/null +++ b/src/deps/skia/include/private/SkSLModifiers.h @@ -0,0 +1,141 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_MODIFIERS +#define SKSL_MODIFIERS + +#include "include/private/SkSLLayout.h" + +#include <vector> + +namespace SkSL { + +class Context; + +/** + * A set of modifier keywords (in, out, uniform, etc.) appearing before a declaration. + */ +struct Modifiers { + /** + * OpenGL requires modifiers to be in a strict order: + * - invariant-qualifier: (invariant) + * - interpolation-qualifier: flat, noperspective, (smooth) + * - storage-qualifier: const, uniform + * - parameter-qualifier: in, out, inout + * - precision-qualifier: highp, mediump, lowp + * + * SkSL does not have `invariant` or `smooth`. + */ + + enum Flag { + kNo_Flag = 0, + // Real GLSL modifiers + kFlat_Flag = 1 << 0, + kNoPerspective_Flag = 1 << 1, + kConst_Flag = 1 << 2, + kUniform_Flag = 1 << 3, + kIn_Flag = 1 << 4, + kOut_Flag = 1 << 5, + kHighp_Flag = 1 << 6, + kMediump_Flag = 1 << 7, + kLowp_Flag = 1 << 8, + // SkSL extensions, not present in GLSL + kES3_Flag = 1 << 9, + kHasSideEffects_Flag = 1 << 10, + kInline_Flag = 1 << 11, + kNoInline_Flag = 1 << 12, + }; + + Modifiers() + : fLayout(Layout()) + , fFlags(0) {} + + Modifiers(const Layout& layout, int flags) + : fLayout(layout) + , fFlags(flags) {} + + String description() const { + String result = fLayout.description(); + + // SkSL extensions + if (fFlags & kES3_Flag) { + result += "$es3 "; + } + if (fFlags & kHasSideEffects_Flag) { + result += "sk_has_side_effects "; + } + if (fFlags & kNoInline_Flag) { + result += "noinline "; + } + + // Real GLSL qualifiers (must be specified in order in GLSL 4.1 and below) + if (fFlags & kFlat_Flag) { + result += "flat "; + } + if (fFlags & kNoPerspective_Flag) { + result += "noperspective "; + } + if (fFlags & kConst_Flag) { + result += "const "; + } + if (fFlags & kUniform_Flag) { + result += "uniform "; + } + if ((fFlags & kIn_Flag) && (fFlags & kOut_Flag)) { + result += "inout "; + } else if (fFlags & kIn_Flag) { + result += "in "; + } else if (fFlags & kOut_Flag) { + result += "out "; + } + if (fFlags & kHighp_Flag) { + result += "highp "; + } + if (fFlags & kMediump_Flag) { + result += "mediump "; + } + if (fFlags & kLowp_Flag) { + result += "lowp "; + } + + return result; + } + + bool operator==(const Modifiers& other) const { + return fLayout == other.fLayout && fFlags == other.fFlags; + } + + bool operator!=(const Modifiers& other) const { + return !(*this == other); + } + + /** + * Verifies that only permitted modifiers and layout flags are included. Reports errors and + * returns false in the event of a violation. + */ + bool checkPermitted(const Context& context, int line, int permittedModifierFlags, + int permittedLayoutFlags) const; + + Layout fLayout; + int fFlags; +}; + +} // namespace SkSL + +namespace std { + +template <> +struct hash<SkSL::Modifiers> { + size_t operator()(const SkSL::Modifiers& key) const { + return (size_t) key.fFlags ^ ((size_t) key.fLayout.fFlags << 8) ^ + ((size_t) key.fLayout.fBuiltin << 16); + } +}; + +} // namespace std + +#endif diff --git a/src/deps/skia/include/private/SkSLProgramElement.h b/src/deps/skia/include/private/SkSLProgramElement.h new file mode 100644 index 000000000..88c4129ee --- /dev/null +++ b/src/deps/skia/include/private/SkSLProgramElement.h @@ -0,0 +1,77 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_PROGRAMELEMENT +#define SKSL_PROGRAMELEMENT + +#include "include/private/SkSLIRNode.h" + +#include <memory> + +namespace SkSL { + +/** + * Represents a top-level element (e.g. function or global variable) in a program. + */ +class ProgramElement : public IRNode { +public: + enum class Kind { + kExtension = 0, + kFunction, + kFunctionPrototype, + kGlobalVar, + kInterfaceBlock, + kModifiers, + kStructDefinition, + + kFirst = kExtension, + kLast = kStructDefinition + }; + + ProgramElement(int offset, Kind kind) + : INHERITED(offset, (int) kind) { + SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast); + } + + Kind kind() const { + return (Kind) fKind; + } + + /** + * Use is<T> to check the type of a program element. + * e.g. replace `el.kind() == ProgramElement::Kind::kExtension` with `el.is<Extension>()`. + */ + template <typename T> + bool is() const { + return this->kind() == T::kProgramElementKind; + } + + /** + * Use as<T> to downcast program elements. e.g. replace `(Extension&) el` with + * `el.as<Extension>()`. + */ + template <typename T> + const T& as() const { + SkASSERT(this->is<T>()); + return static_cast<const T&>(*this); + } + + template <typename T> + T& as() { + SkASSERT(this->is<T>()); + return static_cast<T&>(*this); + } + + virtual std::unique_ptr<ProgramElement> clone() const = 0; + +private: + using INHERITED = IRNode; +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/private/SkSLProgramKind.h b/src/deps/skia/include/private/SkSLProgramKind.h new file mode 100644 index 000000000..96826a70b --- /dev/null +++ b/src/deps/skia/include/private/SkSLProgramKind.h @@ -0,0 +1,31 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSLProgramKind_DEFINED +#define SkSLProgramKind_DEFINED + +#include <cinttypes> + +namespace SkSL { + +/** + * SkSL supports several different program kinds. + */ +enum class ProgramKind : int8_t { + kFragment, + kVertex, + kRuntimeColorFilter, // Runtime effect only suitable as SkColorFilter + kRuntimeShader, // " " " " " SkShader + kRuntimeBlender, // " " " " " SkBlender + kCustomMeshVertex, // Vertex portion of a custom mesh + kCustomMeshFragment, // Fragment " " " " " + kGeneric, +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/private/SkSLSampleUsage.h b/src/deps/skia/include/private/SkSLSampleUsage.h new file mode 100644 index 000000000..a8d67a025 --- /dev/null +++ b/src/deps/skia/include/private/SkSLSampleUsage.h @@ -0,0 +1,89 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSLSampleUsage_DEFINED +#define SkSLSampleUsage_DEFINED + +#include "include/core/SkTypes.h" + +#include <string> + +namespace SkSL { + +/** + * Represents all of the ways that a fragment processor is sampled by its parent. + */ +class SampleUsage { +public: + enum class Kind { + // Child is never sampled + kNone, + // Child is only sampled at the same coordinates as the parent + kPassThrough, + // Child is sampled with a matrix whose value is uniform + kUniformMatrix, + // Child is sampled with sk_FragCoord.xy + kFragCoord, + // Child is sampled using explicit coordinates + kExplicit, + }; + + // Make a SampleUsage that corresponds to no sampling of the child at all + SampleUsage() = default; + + SampleUsage(Kind kind, bool hasPerspective) : fKind(kind), fHasPerspective(hasPerspective) { + if (kind != Kind::kUniformMatrix) { + SkASSERT(!fHasPerspective); + } + } + + // Child is sampled with a matrix whose value is uniform. The name is fixed. + static SampleUsage UniformMatrix(bool hasPerspective) { + return SampleUsage(Kind::kUniformMatrix, hasPerspective); + } + + static SampleUsage Explicit() { + return SampleUsage(Kind::kExplicit, false); + } + + static SampleUsage PassThrough() { + return SampleUsage(Kind::kPassThrough, false); + } + + static SampleUsage FragCoord() { return SampleUsage(Kind::kFragCoord, false); } + + bool operator==(const SampleUsage& that) const { + return fKind == that.fKind && fHasPerspective == that.fHasPerspective; + } + + bool operator!=(const SampleUsage& that) const { return !(*this == that); } + + // Arbitrary name used by all uniform sampling matrices + static const char* MatrixUniformName() { return "matrix"; } + + SampleUsage merge(const SampleUsage& other); + + Kind kind() const { return fKind; } + + bool hasPerspective() const { return fHasPerspective; } + + bool isSampled() const { return fKind != Kind::kNone; } + bool isPassThrough() const { return fKind == Kind::kPassThrough; } + bool isExplicit() const { return fKind == Kind::kExplicit; } + bool isUniformMatrix() const { return fKind == Kind::kUniformMatrix; } + bool isFragCoord() const { return fKind == Kind::kFragCoord; } + + std::string constructor() const; + +private: + Kind fKind = Kind::kNone; + bool fHasPerspective = false; // Only valid if fKind is kUniformMatrix +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/private/SkSLStatement.h b/src/deps/skia/include/private/SkSLStatement.h new file mode 100644 index 000000000..8913369e9 --- /dev/null +++ b/src/deps/skia/include/private/SkSLStatement.h @@ -0,0 +1,87 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_STATEMENT +#define SKSL_STATEMENT + +#include "include/private/SkSLIRNode.h" +#include "include/private/SkSLSymbol.h" + +namespace SkSL { + +/** + * Abstract supertype of all statements. + */ +class Statement : public IRNode { +public: + enum Kind { + kBlock = (int) Symbol::Kind::kLast + 1, + kBreak, + kContinue, + kDiscard, + kDo, + kExpression, + kFor, + kIf, + kInlineMarker, + kNop, + kReturn, + kSwitch, + kSwitchCase, + kVarDeclaration, + + kFirst = kBlock, + kLast = kVarDeclaration, + }; + + Statement(int line, Kind kind) + : INHERITED(line, (int) kind) { + SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast); + } + + Kind kind() const { + return (Kind) fKind; + } + + /** + * Use is<T> to check the type of a statement. + * e.g. replace `s.kind() == Statement::Kind::kReturn` with `s.is<ReturnStatement>()`. + */ + template <typename T> + bool is() const { + return this->fKind == T::kStatementKind; + } + + /** + * Use as<T> to downcast statements. + * e.g. replace `(ReturnStatement&) s` with `s.as<ReturnStatement>()`. + */ + template <typename T> + const T& as() const { + SkASSERT(this->is<T>()); + return static_cast<const T&>(*this); + } + + template <typename T> + T& as() { + SkASSERT(this->is<T>()); + return static_cast<T&>(*this); + } + + virtual bool isEmpty() const { + return false; + } + + virtual std::unique_ptr<Statement> clone() const = 0; + +private: + using INHERITED = IRNode; +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/private/SkSLString.h b/src/deps/skia/include/private/SkSLString.h new file mode 100644 index 000000000..7d828760d --- /dev/null +++ b/src/deps/skia/include/private/SkSLString.h @@ -0,0 +1,80 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_STRING +#define SKSL_STRING + +#include "include/core/SkStringView.h" +#include "include/private/SkSLDefines.h" +#include <cstring> +#include <stdarg.h> +#include <string> + +#ifndef SKSL_STANDALONE +#include "include/core/SkString.h" +#endif + +namespace SkSL { + +class String; + +class SK_API String : public std::string { +public: + using std::string::string; + + explicit String(std::string s) : INHERITED(std::move(s)) {} + explicit String(skstd::string_view s) : INHERITED(s.data(), s.length()) {} + // TODO(johnstiles): add operator skstd::string_view + + static String printf(const char* fmt, ...) SK_PRINTF_LIKE(1, 2); + void appendf(const char* fmt, ...) SK_PRINTF_LIKE(2, 3); + void vappendf(const char* fmt, va_list va); + + bool starts_with(const char prefix[]) const { + return skstd::string_view(data(), size()).starts_with(prefix); + } + bool ends_with(const char suffix[]) const { + return skstd::string_view(data(), size()).ends_with(suffix); + } + + bool consumeSuffix(const char suffix[]); + + String operator+(const char* s) const; + String operator+(const String& s) const; + String operator+(skstd::string_view s) const; + String& operator+=(char c); + String& operator+=(const char* s); + String& operator+=(const String& s); + String& operator+=(skstd::string_view s); + friend String operator+(const char* s1, const String& s2); + +private: + using INHERITED = std::string; +}; + +String operator+(skstd::string_view left, skstd::string_view right); + +String to_string(double value); +String to_string(int32_t value); +String to_string(uint32_t value); +String to_string(int64_t value); +String to_string(uint64_t value); + +bool stod(skstd::string_view s, SKSL_FLOAT* value); +bool stoi(skstd::string_view s, SKSL_INT* value); + +} // namespace SkSL + +namespace std { + template<> struct hash<SkSL::String> { + size_t operator()(const SkSL::String& s) const { + return hash<std::string>{}(s); + } + }; +} // namespace std + +#endif diff --git a/src/deps/skia/include/private/SkSLSymbol.h b/src/deps/skia/include/private/SkSLSymbol.h new file mode 100644 index 000000000..cca74b819 --- /dev/null +++ b/src/deps/skia/include/private/SkSLSymbol.h @@ -0,0 +1,90 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_SYMBOL +#define SKSL_SYMBOL + +#include "include/private/SkSLIRNode.h" +#include "include/private/SkSLProgramElement.h" + +namespace SkSL { + +/** + * Represents a symboltable entry. + */ +class Symbol : public IRNode { +public: + enum class Kind { + kExternal = (int) ProgramElement::Kind::kLast + 1, + kField, + kFunctionDeclaration, + kType, + kUnresolvedFunction, + kVariable, + + kFirst = kExternal, + kLast = kVariable + }; + + Symbol(int offset, Kind kind, skstd::string_view name, const Type* type = nullptr) + : INHERITED(offset, (int) kind) + , fName(name) + , fType(type) { + SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast); + } + + ~Symbol() override {} + + const Type& type() const { + SkASSERT(fType); + return *fType; + } + + Kind kind() const { + return (Kind) fKind; + } + + skstd::string_view name() const { + return fName; + } + + /** + * Use is<T> to check the type of a symbol. + * e.g. replace `sym.kind() == Symbol::Kind::kVariable` with `sym.is<Variable>()`. + */ + template <typename T> + bool is() const { + return this->kind() == T::kSymbolKind; + } + + /** + * Use as<T> to downcast symbols. e.g. replace `(Variable&) sym` with `sym.as<Variable>()`. + */ + template <typename T> + const T& as() const { + SkASSERT(this->is<T>()); + return static_cast<const T&>(*this); + } + + template <typename T> + T& as() { + SkASSERT(this->is<T>()); + return static_cast<T&>(*this); + } + +private: + skstd::string_view fName; + const Type* fType; + + using INHERITED = IRNode; + + friend class Type; +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/private/SkSafe32.h b/src/deps/skia/include/private/SkSafe32.h new file mode 100644 index 000000000..7e59f2b00 --- /dev/null +++ b/src/deps/skia/include/private/SkSafe32.h @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSafe32_DEFINED +#define SkSafe32_DEFINED + +#include "include/core/SkTypes.h" + +static constexpr int32_t Sk64_pin_to_s32(int64_t x) { + return x < SK_MinS32 ? SK_MinS32 : (x > SK_MaxS32 ? SK_MaxS32 : (int32_t)x); +} + +static constexpr int32_t Sk32_sat_add(int32_t a, int32_t b) { + return Sk64_pin_to_s32((int64_t)a + (int64_t)b); +} + +static constexpr int32_t Sk32_sat_sub(int32_t a, int32_t b) { + return Sk64_pin_to_s32((int64_t)a - (int64_t)b); +} + +// To avoid UBSAN complaints about 2's compliment overflows +// +static constexpr int32_t Sk32_can_overflow_add(int32_t a, int32_t b) { + return (int32_t)((uint32_t)a + (uint32_t)b); +} +static constexpr int32_t Sk32_can_overflow_sub(int32_t a, int32_t b) { + return (int32_t)((uint32_t)a - (uint32_t)b); +} + +#endif diff --git a/src/deps/skia/include/private/SkSafe_math.h b/src/deps/skia/include/private/SkSafe_math.h new file mode 100644 index 000000000..144b28a4a --- /dev/null +++ b/src/deps/skia/include/private/SkSafe_math.h @@ -0,0 +1,52 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSafe_math_DEFINED +#define SkSafe_math_DEFINED + +// This file protects against known bugs in ucrt\math.h. +// Namely, that header defines inline methods without marking them static, +// which makes it very easy to cause ODR violations and ensuing chaos. +// +// TODO: other headers? Here are some potential problem headers: +// $ grep -R __inline * | grep -v static | cut -f 1 -d: | sort | uniq +// corecrt.h +// corecrt_stdio_config.h +// ctype.h +// fenv.h +// locale.h +// malloc.h +// math.h +// tchar.h +// wchar.h +// I took a quick look through other headers outside math.h. +// Nothing looks anywhere near as likely to be used by Skia as math.h. + +#if defined(_MSC_VER) && !defined(_INC_MATH) + // Our strategy here is to simply inject "static" into the headers + // where it should have been written, just before __inline. + // + // Most inline-but-not-static methods in math.h are 32-bit only, + // but not all of them (see frexpf, hypothf, ldexpf...). So to + // be safe, 32- and 64-bit builds both get this treatment. + + #define __inline static __inline + #include <math.h> + #undef __inline + + #if !defined(_INC_MATH) + #error Hmm. Looks like math.h has changed its header guards. + #endif + + #define INC_MATH_IS_SAFE_NOW + +#else + #include <math.h> + +#endif + +#endif//SkSafe_math_DEFINED diff --git a/src/deps/skia/include/private/SkSemaphore.h b/src/deps/skia/include/private/SkSemaphore.h new file mode 100644 index 000000000..d7318be57 --- /dev/null +++ b/src/deps/skia/include/private/SkSemaphore.h @@ -0,0 +1,83 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSemaphore_DEFINED +#define SkSemaphore_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkOnce.h" +#include "include/private/SkThreadAnnotations.h" +#include <algorithm> +#include <atomic> + +class SkSemaphore { +public: + constexpr SkSemaphore(int count = 0) : fCount(count), fOSSemaphore(nullptr) {} + + // Cleanup the underlying OS semaphore. + SK_SPI ~SkSemaphore(); + + // Increment the counter n times. + // Generally it's better to call signal(n) instead of signal() n times. + void signal(int n = 1); + + // Decrement the counter by 1, + // then if the counter is < 0, sleep this thread until the counter is >= 0. + void wait(); + + // If the counter is positive, decrement it by 1 and return true, otherwise return false. + SK_SPI bool try_wait(); + +private: + // This implementation follows the general strategy of + // 'A Lightweight Semaphore with Partial Spinning' + // found here + // http://preshing.com/20150316/semaphores-are-surprisingly-versatile/ + // That article (and entire blog) are very much worth reading. + // + // We wrap an OS-provided semaphore with a user-space atomic counter that + // lets us avoid interacting with the OS semaphore unless strictly required: + // moving the count from >=0 to <0 or vice-versa, i.e. sleeping or waking threads. + struct OSSemaphore; + + SK_SPI void osSignal(int n); + SK_SPI void osWait(); + + std::atomic<int> fCount; + SkOnce fOSSemaphoreOnce; + OSSemaphore* fOSSemaphore; +}; + +inline void SkSemaphore::signal(int n) { + int prev = fCount.fetch_add(n, std::memory_order_release); + + // We only want to call the OS semaphore when our logical count crosses + // from <0 to >=0 (when we need to wake sleeping threads). + // + // This is easiest to think about with specific examples of prev and n. + // If n == 5 and prev == -3, there are 3 threads sleeping and we signal + // std::min(-(-3), 5) == 3 times on the OS semaphore, leaving the count at 2. + // + // If prev >= 0, no threads are waiting, std::min(-prev, n) is always <= 0, + // so we don't call the OS semaphore, leaving the count at (prev + n). + int toSignal = std::min(-prev, n); + if (toSignal > 0) { + this->osSignal(toSignal); + } +} + +inline void SkSemaphore::wait() { + // Since this fetches the value before the subtract, zero and below means that there are no + // resources left, so the thread needs to wait. + if (fCount.fetch_sub(1, std::memory_order_acquire) <= 0) { + SK_POTENTIALLY_BLOCKING_REGION_BEGIN; + this->osWait(); + SK_POTENTIALLY_BLOCKING_REGION_END; + } +} + +#endif//SkSemaphore_DEFINED diff --git a/src/deps/skia/include/private/SkShaderCodeDictionary.h b/src/deps/skia/include/private/SkShaderCodeDictionary.h new file mode 100644 index 000000000..1eb86fb87 --- /dev/null +++ b/src/deps/skia/include/private/SkShaderCodeDictionary.h @@ -0,0 +1,63 @@ +/* + * Copyright 2022 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkShaderCodeDictionary_DEFINED +#define SkShaderCodeDictionary_DEFINED + +#include <unordered_map> +#include "include/private/SkPaintParamsKey.h" +#include "include/private/SkSpinlock.h" +#include "include/private/SkUniquePaintParamsID.h" +#include "src/core/SkArenaAlloc.h" + +class SkShaderCodeDictionary { +public: + SkShaderCodeDictionary(); + + struct Entry { + public: + SkUniquePaintParamsID uniqueID() const { + SkASSERT(fUniqueID.isValid()); + return fUniqueID; + } + const SkPaintParamsKey& paintParamsKey() const { return fPaintParamsKey; } + + private: + friend class SkShaderCodeDictionary; + + Entry(const SkPaintParamsKey& paintParamsKey) : fPaintParamsKey(paintParamsKey) {} + + void setUniqueID(uint32_t newID) { + SkASSERT(!fUniqueID.isValid()); + fUniqueID = SkUniquePaintParamsID(newID); + } + + SkUniquePaintParamsID fUniqueID; // fixed-size (uint32_t) unique ID assigned to a key + SkPaintParamsKey fPaintParamsKey; // variable-length paint key descriptor + }; + + const Entry* findOrCreate(const SkPaintParamsKey&) SK_EXCLUDES(fSpinLock); + + const Entry* lookup(SkUniquePaintParamsID) const SK_EXCLUDES(fSpinLock); + +private: + Entry* makeEntry(const SkPaintParamsKey&); + + struct Hash { + size_t operator()(const SkPaintParamsKey&) const; + }; + + // TODO: can we do something better given this should have write-seldom/read-often behavior? + mutable SkSpinlock fSpinLock; + + std::unordered_map<SkPaintParamsKey, Entry*, Hash> fHash SK_GUARDED_BY(fSpinLock); + std::vector<Entry*> fEntryVector SK_GUARDED_BY(fSpinLock); + + SkArenaAlloc fArena{256}; +}; + +#endif // SkShaderCodeDictionary_DEFINED diff --git a/src/deps/skia/include/private/SkShadowFlags.h b/src/deps/skia/include/private/SkShadowFlags.h new file mode 100644 index 000000000..6438f041a --- /dev/null +++ b/src/deps/skia/include/private/SkShadowFlags.h @@ -0,0 +1,25 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkShadowFlags_DEFINED +#define SkShadowFlags_DEFINED + +// A set of flags shared between the SkAmbientShadowMaskFilter and the SkSpotShadowMaskFilter +enum SkShadowFlags { + kNone_ShadowFlag = 0x00, + /** The occluding object is not opaque. Knowing that the occluder is opaque allows + * us to cull shadow geometry behind it and improve performance. */ + kTransparentOccluder_ShadowFlag = 0x01, + /** Don't try to use analytic shadows. */ + kGeometricOnly_ShadowFlag = 0x02, + /** Light position represents a direction, light radius is blur radius at elevation 1 */ + kDirectionalLight_ShadowFlag = 0x04, + /** mask for all shadow flags */ + kAll_ShadowFlag = 0x07 +}; + +#endif diff --git a/src/deps/skia/include/private/SkSpinlock.h b/src/deps/skia/include/private/SkSpinlock.h new file mode 100644 index 000000000..e1d501168 --- /dev/null +++ b/src/deps/skia/include/private/SkSpinlock.h @@ -0,0 +1,57 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSpinlock_DEFINED +#define SkSpinlock_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkThreadAnnotations.h" +#include <atomic> + +class SK_CAPABILITY("mutex") SkSpinlock { +public: + constexpr SkSpinlock() = default; + + void acquire() SK_ACQUIRE() { + // To act as a mutex, we need an acquire barrier when we acquire the lock. + if (fLocked.exchange(true, std::memory_order_acquire)) { + // Lock was contended. Fall back to an out-of-line spin loop. + this->contendedAcquire(); + } + } + + // Acquire the lock or fail (quickly). Lets the caller decide to do something other than wait. + bool tryAcquire() SK_TRY_ACQUIRE(true) { + // To act as a mutex, we need an acquire barrier when we acquire the lock. + if (fLocked.exchange(true, std::memory_order_acquire)) { + // Lock was contended. Let the caller decide what to do. + return false; + } + return true; + } + + void release() SK_RELEASE_CAPABILITY() { + // To act as a mutex, we need a release barrier when we release the lock. + fLocked.store(false, std::memory_order_release); + } + +private: + SK_API void contendedAcquire(); + + std::atomic<bool> fLocked{false}; +}; + +class SK_SCOPED_CAPABILITY SkAutoSpinlock { +public: + SkAutoSpinlock(SkSpinlock& mutex) SK_ACQUIRE(mutex) : fSpinlock(mutex) { fSpinlock.acquire(); } + ~SkAutoSpinlock() SK_RELEASE_CAPABILITY() { fSpinlock.release(); } + +private: + SkSpinlock& fSpinlock; +}; + +#endif//SkSpinlock_DEFINED diff --git a/src/deps/skia/include/private/SkTArray.h b/src/deps/skia/include/private/SkTArray.h new file mode 100644 index 000000000..9db5fd030 --- /dev/null +++ b/src/deps/skia/include/private/SkTArray.h @@ -0,0 +1,640 @@ +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTArray_DEFINED +#define SkTArray_DEFINED + +#include "include/core/SkMath.h" +#include "include/core/SkTypes.h" +#include "include/private/SkMalloc.h" +#include "include/private/SkSafe32.h" +#include "include/private/SkTLogic.h" +#include "include/private/SkTemplates.h" +#include "include/private/SkTo.h" + +#include <string.h> +#include <initializer_list> +#include <memory> +#include <new> +#include <utility> + +/** SkTArray<T> implements a typical, mostly std::vector-like array. + Each T will be default-initialized on allocation, and ~T will be called on destruction. + + MEM_MOVE controls the behavior when a T needs to be moved (e.g. when the array is resized) + - true: T will be bit-copied via memcpy. + - false: T will be moved via move-constructors. + + Modern implementations of std::vector<T> will generally provide similar performance + characteristics when used with appropriate care. Consider using std::vector<T> in new code. +*/ +template <typename T, bool MEM_MOVE = false> class SkTArray { +private: + enum ReallocType { kExactFit, kGrowing, kShrinking }; + +public: + using value_type = T; + + /** + * Creates an empty array with no initial storage + */ + SkTArray() { this->init(0); } + + /** + * Creates an empty array that will preallocate space for reserveCount + * elements. + */ + explicit SkTArray(int reserveCount) : SkTArray() { this->reserve_back(reserveCount); } + + /** + * Copies one array to another. The new array will be heap allocated. + */ + SkTArray(const SkTArray& that) + : SkTArray(that.fItemArray, that.fCount) {} + + SkTArray(SkTArray&& that) { + if (that.fOwnMemory) { + fItemArray = that.fItemArray; + fCount = that.fCount; + fAllocCount = that.fAllocCount; + fOwnMemory = true; + fReserved = that.fReserved; + + that.fItemArray = nullptr; + that.fCount = 0; + that.fAllocCount = 0; + that.fOwnMemory = true; + that.fReserved = false; + } else { + this->init(that.fCount); + that.move(fItemArray); + that.fCount = 0; + } + } + + /** + * Creates a SkTArray by copying contents of a standard C array. The new + * array will be heap allocated. Be careful not to use this constructor + * when you really want the (void*, int) version. + */ + SkTArray(const T* array, int count) { + this->init(count); + this->copy(array); + } + /** + * Creates a SkTArray by copying contents of an initializer list. + */ + SkTArray(std::initializer_list<T> data) + : SkTArray(data.begin(), data.size()) {} + + SkTArray& operator=(const SkTArray& that) { + if (this == &that) { + return *this; + } + for (int i = 0; i < this->count(); ++i) { + fItemArray[i].~T(); + } + fCount = 0; + this->checkRealloc(that.count(), kExactFit); + fCount = that.fCount; + this->copy(that.fItemArray); + return *this; + } + SkTArray& operator=(SkTArray&& that) { + if (this == &that) { + return *this; + } + for (int i = 0; i < this->count(); ++i) { + fItemArray[i].~T(); + } + fCount = 0; + this->checkRealloc(that.count(), kExactFit); + fCount = that.fCount; + that.move(fItemArray); + that.fCount = 0; + return *this; + } + + ~SkTArray() { + for (int i = 0; i < this->count(); ++i) { + fItemArray[i].~T(); + } + if (fOwnMemory) { + sk_free(fItemArray); + } + } + + /** + * Resets to count() == 0 and resets any reserve count. + */ + void reset() { + this->pop_back_n(fCount); + fReserved = false; + } + + /** + * Resets to count() = n newly constructed T objects and resets any reserve count. + */ + void reset(int n) { + SkASSERT(n >= 0); + for (int i = 0; i < this->count(); ++i) { + fItemArray[i].~T(); + } + // Set fCount to 0 before calling checkRealloc so that no elements are moved. + fCount = 0; + this->checkRealloc(n, kExactFit); + fCount = n; + for (int i = 0; i < this->count(); ++i) { + new (fItemArray + i) T; + } + fReserved = false; + } + + /** + * Resets to a copy of a C array and resets any reserve count. + */ + void reset(const T* array, int count) { + for (int i = 0; i < this->count(); ++i) { + fItemArray[i].~T(); + } + fCount = 0; + this->checkRealloc(count, kExactFit); + fCount = count; + this->copy(array); + fReserved = false; + } + + /** + * Ensures there is enough reserved space for n additional elements. The is guaranteed at least + * until the array size grows above n and subsequently shrinks below n, any version of reset() + * is called, or reserve_back() is called again. + */ + void reserve_back(int n) { + SkASSERT(n >= 0); + if (n > 0) { + this->checkRealloc(n, kExactFit); + fReserved = fOwnMemory; + } else { + fReserved = false; + } + } + + void removeShuffle(int n) { + SkASSERT(n < this->count()); + int newCount = fCount - 1; + fCount = newCount; + fItemArray[n].~T(); + if (n != newCount) { + this->move(n, newCount); + } + } + + /** + * Number of elements in the array. + */ + int count() const { return fCount; } + + /** + * Is the array empty. + */ + bool empty() const { return !fCount; } + + /** + * Adds 1 new default-initialized T value and returns it by reference. Note + * the reference only remains valid until the next call that adds or removes + * elements. + */ + T& push_back() { + void* newT = this->push_back_raw(1); + return *new (newT) T; + } + + /** + * Version of above that uses a copy constructor to initialize the new item + */ + T& push_back(const T& t) { + void* newT = this->push_back_raw(1); + return *new (newT) T(t); + } + + /** + * Version of above that uses a move constructor to initialize the new item + */ + T& push_back(T&& t) { + void* newT = this->push_back_raw(1); + return *new (newT) T(std::move(t)); + } + + /** + * Construct a new T at the back of this array. + */ + template<class... Args> T& emplace_back(Args&&... args) { + void* newT = this->push_back_raw(1); + return *new (newT) T(std::forward<Args>(args)...); + } + + /** + * Allocates n more default-initialized T values, and returns the address of + * the start of that new range. Note: this address is only valid until the + * next API call made on the array that might add or remove elements. + */ + T* push_back_n(int n) { + SkASSERT(n >= 0); + void* newTs = this->push_back_raw(n); + for (int i = 0; i < n; ++i) { + new (static_cast<char*>(newTs) + i * sizeof(T)) T; + } + return static_cast<T*>(newTs); + } + + /** + * Version of above that uses a copy constructor to initialize all n items + * to the same T. + */ + T* push_back_n(int n, const T& t) { + SkASSERT(n >= 0); + void* newTs = this->push_back_raw(n); + for (int i = 0; i < n; ++i) { + new (static_cast<char*>(newTs) + i * sizeof(T)) T(t); + } + return static_cast<T*>(newTs); + } + + /** + * Version of above that uses a copy constructor to initialize the n items + * to separate T values. + */ + T* push_back_n(int n, const T t[]) { + SkASSERT(n >= 0); + this->checkRealloc(n, kGrowing); + for (int i = 0; i < n; ++i) { + new (fItemArray + fCount + i) T(t[i]); + } + fCount += n; + return fItemArray + fCount - n; + } + + /** + * Version of above that uses the move constructor to set n items. + */ + T* move_back_n(int n, T* t) { + SkASSERT(n >= 0); + this->checkRealloc(n, kGrowing); + for (int i = 0; i < n; ++i) { + new (fItemArray + fCount + i) T(std::move(t[i])); + } + fCount += n; + return fItemArray + fCount - n; + } + + /** + * Removes the last element. Not safe to call when count() == 0. + */ + void pop_back() { + SkASSERT(fCount > 0); + --fCount; + fItemArray[fCount].~T(); + this->checkRealloc(0, kShrinking); + } + + /** + * Removes the last n elements. Not safe to call when count() < n. + */ + void pop_back_n(int n) { + SkASSERT(n >= 0); + SkASSERT(this->count() >= n); + fCount -= n; + for (int i = 0; i < n; ++i) { + fItemArray[fCount + i].~T(); + } + this->checkRealloc(0, kShrinking); + } + + /** + * Pushes or pops from the back to resize. Pushes will be default + * initialized. + */ + void resize_back(int newCount) { + SkASSERT(newCount >= 0); + + if (newCount > this->count()) { + this->push_back_n(newCount - fCount); + } else if (newCount < this->count()) { + this->pop_back_n(fCount - newCount); + } + } + + /** Swaps the contents of this array with that array. Does a pointer swap if possible, + otherwise copies the T values. */ + void swap(SkTArray& that) { + using std::swap; + if (this == &that) { + return; + } + if (fOwnMemory && that.fOwnMemory) { + swap(fItemArray, that.fItemArray); + + auto count = fCount; + fCount = that.fCount; + that.fCount = count; + + auto allocCount = fAllocCount; + fAllocCount = that.fAllocCount; + that.fAllocCount = allocCount; + } else { + // This could be more optimal... + SkTArray copy(std::move(that)); + that = std::move(*this); + *this = std::move(copy); + } + } + + T* begin() { + return fItemArray; + } + const T* begin() const { + return fItemArray; + } + T* end() { + return fItemArray ? fItemArray + fCount : nullptr; + } + const T* end() const { + return fItemArray ? fItemArray + fCount : nullptr; + } + T* data() { return fItemArray; } + const T* data() const { return fItemArray; } + size_t size() const { return (size_t)fCount; } + void resize(size_t count) { this->resize_back((int)count); } + + /** + * Get the i^th element. + */ + T& operator[] (int i) { + SkASSERT(i < this->count()); + SkASSERT(i >= 0); + return fItemArray[i]; + } + + const T& operator[] (int i) const { + SkASSERT(i < this->count()); + SkASSERT(i >= 0); + return fItemArray[i]; + } + + T& at(int i) { return (*this)[i]; } + const T& at(int i) const { return (*this)[i]; } + + /** + * equivalent to operator[](0) + */ + T& front() { SkASSERT(fCount > 0); return fItemArray[0];} + + const T& front() const { SkASSERT(fCount > 0); return fItemArray[0];} + + /** + * equivalent to operator[](count() - 1) + */ + T& back() { SkASSERT(fCount); return fItemArray[fCount - 1];} + + const T& back() const { SkASSERT(fCount > 0); return fItemArray[fCount - 1];} + + /** + * equivalent to operator[](count()-1-i) + */ + T& fromBack(int i) { + SkASSERT(i >= 0); + SkASSERT(i < this->count()); + return fItemArray[fCount - i - 1]; + } + + const T& fromBack(int i) const { + SkASSERT(i >= 0); + SkASSERT(i < this->count()); + return fItemArray[fCount - i - 1]; + } + + bool operator==(const SkTArray<T, MEM_MOVE>& right) const { + int leftCount = this->count(); + if (leftCount != right.count()) { + return false; + } + for (int index = 0; index < leftCount; ++index) { + if (fItemArray[index] != right.fItemArray[index]) { + return false; + } + } + return true; + } + + bool operator!=(const SkTArray<T, MEM_MOVE>& right) const { + return !(*this == right); + } + + int capacity() const { + return fAllocCount; + } + +protected: + /** + * Creates an empty array that will use the passed storage block until it + * is insufficiently large to hold the entire array. + */ + template <int N> + SkTArray(SkAlignedSTStorage<N,T>* storage) { + this->initWithPreallocatedStorage(0, storage->get(), N); + } + + /** + * Copy a C array, using preallocated storage if preAllocCount >= + * count. Otherwise storage will only be used when array shrinks + * to fit. + */ + template <int N> + SkTArray(const T* array, int count, SkAlignedSTStorage<N,T>* storage) { + this->initWithPreallocatedStorage(count, storage->get(), N); + this->copy(array); + } + +private: + void init(int count) { + fCount = SkToU32(count); + if (!count) { + fAllocCount = 0; + fItemArray = nullptr; + } else { + fAllocCount = SkToU32(std::max(count, kMinHeapAllocCount)); + fItemArray = (T*)sk_malloc_throw((size_t)fAllocCount, sizeof(T)); + } + fOwnMemory = true; + fReserved = false; + } + + void initWithPreallocatedStorage(int count, void* preallocStorage, int preallocCount) { + SkASSERT(count >= 0); + SkASSERT(preallocCount > 0); + SkASSERT(preallocStorage); + fCount = count; + fItemArray = nullptr; + fReserved = false; + if (count > preallocCount) { + fAllocCount = std::max(count, kMinHeapAllocCount); + fItemArray = (T*)sk_malloc_throw(fAllocCount, sizeof(T)); + fOwnMemory = true; + } else { + fAllocCount = preallocCount; + fItemArray = (T*)preallocStorage; + fOwnMemory = false; + } + } + + /** In the following move and copy methods, 'dst' is assumed to be uninitialized raw storage. + * In the following move methods, 'src' is destroyed leaving behind uninitialized raw storage. + */ + void copy(const T* src) { + // Some types may be trivially copyable, in which case we *could* use memcopy; but + // MEM_MOVE == true implies that the type is trivially movable, and not necessarily + // trivially copyable (think sk_sp<>). So short of adding another template arg, we + // must be conservative and use copy construction. + for (int i = 0; i < this->count(); ++i) { + new (fItemArray + i) T(src[i]); + } + } + + template <bool E = MEM_MOVE> std::enable_if_t<E, void> move(int dst, int src) { + memcpy(&fItemArray[dst], &fItemArray[src], sizeof(T)); + } + template <bool E = MEM_MOVE> std::enable_if_t<E, void> move(void* dst) { + sk_careful_memcpy(dst, fItemArray, fCount * sizeof(T)); + } + + template <bool E = MEM_MOVE> std::enable_if_t<!E, void> move(int dst, int src) { + new (&fItemArray[dst]) T(std::move(fItemArray[src])); + fItemArray[src].~T(); + } + template <bool E = MEM_MOVE> std::enable_if_t<!E, void> move(void* dst) { + for (int i = 0; i < this->count(); ++i) { + new (static_cast<char*>(dst) + sizeof(T) * (size_t)i) T(std::move(fItemArray[i])); + fItemArray[i].~T(); + } + } + + static constexpr int kMinHeapAllocCount = 8; + + // Helper function that makes space for n objects, adjusts the count, but does not initialize + // the new objects. + void* push_back_raw(int n) { + this->checkRealloc(n, kGrowing); + void* ptr = fItemArray + fCount; + fCount += n; + return ptr; + } + + void checkRealloc(int delta, ReallocType reallocType) { + SkASSERT(fCount >= 0); + SkASSERT(fAllocCount >= 0); + SkASSERT(-delta <= this->count()); + + // Move into 64bit math temporarily, to avoid local overflows + int64_t newCount = fCount + delta; + + // We allow fAllocCount to be in the range [newCount, 3*newCount]. We also never shrink + // when we're currently using preallocated memory, would allocate less than + // kMinHeapAllocCount, or a reserve count was specified that has yet to be exceeded. + bool mustGrow = newCount > fAllocCount; + bool shouldShrink = fAllocCount > 3 * newCount && fOwnMemory && !fReserved; + if (!mustGrow && !shouldShrink) { + return; + } + + int64_t newAllocCount = newCount; + if (reallocType != kExactFit) { + // Whether we're growing or shrinking, leave at least 50% extra space for future growth. + newAllocCount += ((newCount + 1) >> 1); + // Align the new allocation count to kMinHeapAllocCount. + static_assert(SkIsPow2(kMinHeapAllocCount), "min alloc count not power of two."); + newAllocCount = (newAllocCount + (kMinHeapAllocCount - 1)) & ~(kMinHeapAllocCount - 1); + } + + // At small sizes the old and new alloc count can both be kMinHeapAllocCount. + if (newAllocCount == fAllocCount) { + return; + } + + fAllocCount = SkToU32(Sk64_pin_to_s32(newAllocCount)); + SkASSERT(fAllocCount >= newCount); + T* newItemArray = (T*)sk_malloc_throw((size_t)fAllocCount, sizeof(T)); + this->move(newItemArray); + if (fOwnMemory) { + sk_free(fItemArray); + } + fItemArray = newItemArray; + fOwnMemory = true; + fReserved = false; + } + + T* fItemArray; + uint32_t fOwnMemory : 1; + uint32_t fCount : 31; + uint32_t fReserved : 1; + uint32_t fAllocCount : 31; +}; + +template <typename T, bool M> static inline void swap(SkTArray<T, M>& a, SkTArray<T, M>& b) { + a.swap(b); +} + +template<typename T, bool MEM_MOVE> constexpr int SkTArray<T, MEM_MOVE>::kMinHeapAllocCount; + +/** + * Subclass of SkTArray that contains a preallocated memory block for the array. + */ +template <int N, typename T, bool MEM_MOVE = false> +class SkSTArray : private SkAlignedSTStorage<N,T>, public SkTArray<T, MEM_MOVE> { +private: + using STORAGE = SkAlignedSTStorage<N,T>; + using INHERITED = SkTArray<T, MEM_MOVE>; + +public: + SkSTArray() + : STORAGE{}, INHERITED(static_cast<STORAGE*>(this)) {} + + SkSTArray(const T* array, int count) + : STORAGE{}, INHERITED(array, count, static_cast<STORAGE*>(this)) {} + + SkSTArray(std::initializer_list<T> data) + : SkSTArray(data.begin(), data.size()) {} + + explicit SkSTArray(int reserveCount) + : SkSTArray() { + this->reserve_back(reserveCount); + } + + SkSTArray (const SkSTArray& that) : SkSTArray() { *this = that; } + explicit SkSTArray(const INHERITED& that) : SkSTArray() { *this = that; } + SkSTArray ( SkSTArray&& that) : SkSTArray() { *this = std::move(that); } + explicit SkSTArray( INHERITED&& that) : SkSTArray() { *this = std::move(that); } + + SkSTArray& operator=(const SkSTArray& that) { + INHERITED::operator=(that); + return *this; + } + SkSTArray& operator=(const INHERITED& that) { + INHERITED::operator=(that); + return *this; + } + + SkSTArray& operator=(SkSTArray&& that) { + INHERITED::operator=(std::move(that)); + return *this; + } + SkSTArray& operator=(INHERITED&& that) { + INHERITED::operator=(std::move(that)); + return *this; + } +}; + +#endif diff --git a/src/deps/skia/include/private/SkTDArray.h b/src/deps/skia/include/private/SkTDArray.h new file mode 100644 index 000000000..d06b46f72 --- /dev/null +++ b/src/deps/skia/include/private/SkTDArray.h @@ -0,0 +1,385 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + +#ifndef SkTDArray_DEFINED +#define SkTDArray_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkMalloc.h" +#include "include/private/SkTo.h" + +#include <algorithm> +#include <initializer_list> +#include <utility> + +/** SkTDArray<T> implements a std::vector-like array for raw data-only objects that do not require + construction or destruction. The constructor and destructor for T will not be called; T objects + will always be moved via raw memcpy. Newly created T objects will contain uninitialized memory. + + In most cases, std::vector<T> can provide a similar level of performance for POD objects when + used with appropriate care. In new code, consider std::vector<T> instead. +*/ +template <typename T> class SkTDArray { +public: + SkTDArray() : fArray(nullptr), fReserve(0), fCount(0) {} + SkTDArray(const T src[], int count) { + SkASSERT(src || count == 0); + + fReserve = fCount = 0; + fArray = nullptr; + if (count) { + fArray = (T*)sk_malloc_throw(SkToSizeT(count) * sizeof(T)); + memcpy(fArray, src, sizeof(T) * SkToSizeT(count)); + fReserve = fCount = count; + } + } + SkTDArray(const std::initializer_list<T>& list) : SkTDArray(list.begin(), list.size()) {} + SkTDArray(const SkTDArray<T>& src) : fArray(nullptr), fReserve(0), fCount(0) { + SkTDArray<T> tmp(src.fArray, src.fCount); + this->swap(tmp); + } + SkTDArray(SkTDArray<T>&& src) : fArray(nullptr), fReserve(0), fCount(0) { + this->swap(src); + } + ~SkTDArray() { + sk_free(fArray); + } + + SkTDArray<T>& operator=(const SkTDArray<T>& src) { + if (this != &src) { + if (src.fCount > fReserve) { + SkTDArray<T> tmp(src.fArray, src.fCount); + this->swap(tmp); + } else { + sk_careful_memcpy(fArray, src.fArray, sizeof(T) * SkToSizeT(src.fCount)); + fCount = src.fCount; + } + } + return *this; + } + SkTDArray<T>& operator=(SkTDArray<T>&& src) { + if (this != &src) { + this->swap(src); + src.reset(); + } + return *this; + } + + friend bool operator==(const SkTDArray<T>& a, const SkTDArray<T>& b) { + return a.fCount == b.fCount && + (a.fCount == 0 || + !memcmp(a.fArray, b.fArray, SkToSizeT(a.fCount) * sizeof(T))); + } + friend bool operator!=(const SkTDArray<T>& a, const SkTDArray<T>& b) { + return !(a == b); + } + + void swap(SkTDArray<T>& that) { + using std::swap; + swap(fArray, that.fArray); + swap(fReserve, that.fReserve); + swap(fCount, that.fCount); + } + + bool isEmpty() const { return fCount == 0; } + bool empty() const { return this->isEmpty(); } + + /** + * Return the number of elements in the array + */ + int count() const { return fCount; } + size_t size() const { return fCount; } + + /** + * Return the total number of elements allocated. + * reserved() - count() gives you the number of elements you can add + * without causing an allocation. + */ + int reserved() const { return fReserve; } + + /** + * return the number of bytes in the array: count * sizeof(T) + */ + size_t bytes() const { return fCount * sizeof(T); } + + T* begin() { return fArray; } + const T* begin() const { return fArray; } + T* end() { return fArray ? fArray + fCount : nullptr; } + const T* end() const { return fArray ? fArray + fCount : nullptr; } + + T& operator[](int index) { + SkASSERT(index < fCount); + return fArray[index]; + } + const T& operator[](int index) const { + SkASSERT(index < fCount); + return fArray[index]; + } + + T& getAt(int index) { + return (*this)[index]; + } + + const T& back() const { SkASSERT(fCount > 0); return fArray[fCount-1]; } + T& back() { SkASSERT(fCount > 0); return fArray[fCount-1]; } + + void reset() { + if (fArray) { + sk_free(fArray); + fArray = nullptr; + fReserve = fCount = 0; + } else { + SkASSERT(fReserve == 0 && fCount == 0); + } + } + + void rewind() { + // same as setCount(0) + fCount = 0; + } + + /** + * Sets the number of elements in the array. + * If the array does not have space for count elements, it will increase + * the storage allocated to some amount greater than that required. + * It will never shrink the storage. + */ + void setCount(int count) { + SkASSERT(count >= 0); + if (count > fReserve) { + this->resizeStorageToAtLeast(count); + } + fCount = count; + } + + void setReserve(int reserve) { + SkASSERT(reserve >= 0); + if (reserve > fReserve) { + this->resizeStorageToAtLeast(reserve); + } + } + void reserve(size_t n) { + SkASSERT_RELEASE(SkTFitsIn<int>(n)); + this->setReserve(SkToInt(n)); + } + + T* prepend() { + this->adjustCount(1); + memmove(fArray + 1, fArray, (fCount - 1) * sizeof(T)); + return fArray; + } + + T* append() { + return this->append(1, nullptr); + } + T* append(int count, const T* src = nullptr) { + int oldCount = fCount; + if (count) { + SkASSERT(src == nullptr || fArray == nullptr || + src + count <= fArray || fArray + oldCount <= src); + + this->adjustCount(count); + if (src) { + memcpy(fArray + oldCount, src, sizeof(T) * count); + } + } + return fArray + oldCount; + } + + T* insert(int index) { + return this->insert(index, 1, nullptr); + } + T* insert(int index, int count, const T* src = nullptr) { + SkASSERT(count); + SkASSERT(index <= fCount); + size_t oldCount = fCount; + this->adjustCount(count); + T* dst = fArray + index; + memmove(dst + count, dst, sizeof(T) * (oldCount - index)); + if (src) { + memcpy(dst, src, sizeof(T) * count); + } + return dst; + } + + void remove(int index, int count = 1) { + SkASSERT(index + count <= fCount); + fCount = fCount - count; + memmove(fArray + index, fArray + index + count, sizeof(T) * (fCount - index)); + } + + void removeShuffle(int index) { + SkASSERT(index < fCount); + int newCount = fCount - 1; + fCount = newCount; + if (index != newCount) { + memcpy(fArray + index, fArray + newCount, sizeof(T)); + } + } + + int find(const T& elem) const { + const T* iter = fArray; + const T* stop = fArray + fCount; + + for (; iter < stop; iter++) { + if (*iter == elem) { + return SkToInt(iter - fArray); + } + } + return -1; + } + + int rfind(const T& elem) const { + const T* iter = fArray + fCount; + const T* stop = fArray; + + while (iter > stop) { + if (*--iter == elem) { + return SkToInt(iter - stop); + } + } + return -1; + } + + /** + * Returns true iff the array contains this element. + */ + bool contains(const T& elem) const { + return (this->find(elem) >= 0); + } + + /** + * Copies up to max elements into dst. The number of items copied is + * capped by count - index. The actual number copied is returned. + */ + int copyRange(T* dst, int index, int max) const { + SkASSERT(max >= 0); + SkASSERT(!max || dst); + if (index >= fCount) { + return 0; + } + int count = std::min(max, fCount - index); + memcpy(dst, fArray + index, sizeof(T) * count); + return count; + } + + void copy(T* dst) const { + this->copyRange(dst, 0, fCount); + } + + // routines to treat the array like a stack + void push_back(const T& v) { *this->append() = v; } + T* push() { return this->append(); } + const T& top() const { return (*this)[fCount - 1]; } + T& top() { return (*this)[fCount - 1]; } + void pop(T* elem) { SkASSERT(fCount > 0); if (elem) *elem = (*this)[fCount - 1]; --fCount; } + void pop() { SkASSERT(fCount > 0); --fCount; } + + void deleteAll() { + T* iter = fArray; + T* stop = fArray + fCount; + while (iter < stop) { + delete *iter; + iter += 1; + } + this->reset(); + } + + void freeAll() { + T* iter = fArray; + T* stop = fArray + fCount; + while (iter < stop) { + sk_free(*iter); + iter += 1; + } + this->reset(); + } + + void unrefAll() { + T* iter = fArray; + T* stop = fArray + fCount; + while (iter < stop) { + (*iter)->unref(); + iter += 1; + } + this->reset(); + } + + void safeUnrefAll() { + T* iter = fArray; + T* stop = fArray + fCount; + while (iter < stop) { + SkSafeUnref(*iter); + iter += 1; + } + this->reset(); + } + +#ifdef SK_DEBUG + void validate() const { + SkASSERT((fReserve == 0 && fArray == nullptr) || + (fReserve > 0 && fArray != nullptr)); + SkASSERT(fCount <= fReserve); + } +#endif + + void shrinkToFit() { + if (fReserve != fCount) { + SkASSERT(fReserve > fCount); + fReserve = fCount; + fArray = (T*)sk_realloc_throw(fArray, fReserve * sizeof(T)); + } + } + +private: + T* fArray; + int fReserve; // size of the allocation in fArray (#elements) + int fCount; // logical number of elements (fCount <= fReserve) + + /** + * Adjusts the number of elements in the array. + * This is the same as calling setCount(count() + delta). + */ + void adjustCount(int delta) { + SkASSERT(delta > 0); + + // We take care to avoid overflow here. + // The sum of fCount and delta is at most 4294967294, which fits fine in uint32_t. + uint32_t count = (uint32_t)fCount + (uint32_t)delta; + SkASSERT_RELEASE( SkTFitsIn<int>(count) ); + + this->setCount(SkTo<int>(count)); + } + + /** + * Increase the storage allocation such that it can hold (fCount + extra) + * elements. + * It never shrinks the allocation, and it may increase the allocation by + * more than is strictly required, based on a private growth heuristic. + * + * note: does NOT modify fCount + */ + void resizeStorageToAtLeast(int count) { + SkASSERT(count > fReserve); + + // We take care to avoid overflow here. + // The maximum value we can get for reserve here is 2684354563, which fits in uint32_t. + uint32_t reserve = (uint32_t)count + 4; + reserve += reserve / 4; + SkASSERT_RELEASE( SkTFitsIn<int>(reserve) ); + + fReserve = SkTo<int>(reserve); + fArray = (T*)sk_realloc_throw(fArray, (size_t)fReserve * sizeof(T)); + } +}; + +template <typename T> static inline void swap(SkTDArray<T>& a, SkTDArray<T>& b) { + a.swap(b); +} + +#endif diff --git a/src/deps/skia/include/private/SkTFitsIn.h b/src/deps/skia/include/private/SkTFitsIn.h new file mode 100644 index 000000000..a912f13e0 --- /dev/null +++ b/src/deps/skia/include/private/SkTFitsIn.h @@ -0,0 +1,99 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTFitsIn_DEFINED +#define SkTFitsIn_DEFINED + +#include <limits> +#include <stdint.h> +#include <type_traits> + +/** + * std::underlying_type is only defined for enums. For integral types, we just want the type. + */ +template <typename T, class Enable = void> +struct sk_strip_enum { + typedef T type; +}; + +template <typename T> +struct sk_strip_enum<T, typename std::enable_if<std::is_enum<T>::value>::type> { + typedef typename std::underlying_type<T>::type type; +}; + + +/** + * In C++ an unsigned to signed cast where the source value cannot be represented in the destination + * type results in an implementation defined destination value. Unlike C, C++ does not allow a trap. + * This makes "(S)(D)s == s" a possibly useful test. However, there are two cases where this is + * incorrect: + * + * when testing if a value of a smaller signed type can be represented in a larger unsigned type + * (int8_t)(uint16_t)-1 == -1 => (int8_t)0xFFFF == -1 => [implementation defined] == -1 + * + * when testing if a value of a larger unsigned type can be represented in a smaller signed type + * (uint16_t)(int8_t)0xFFFF == 0xFFFF => (uint16_t)-1 == 0xFFFF => 0xFFFF == 0xFFFF => true. + * + * Consider the cases: + * u = unsigned, less digits + * U = unsigned, more digits + * s = signed, less digits + * S = signed, more digits + * v is the value we're considering. + * + * u -> U: (u)(U)v == v, trivially true + * U -> u: (U)(u)v == v, both casts well defined, test works + * s -> S: (s)(S)v == v, trivially true + * S -> s: (S)(s)v == v, first cast implementation value, second cast defined, test works + * s -> U: (s)(U)v == v, *this is bad*, the second cast results in implementation defined value + * S -> u: (S)(u)v == v, the second cast is required to prevent promotion of rhs to unsigned + * u -> S: (u)(S)v == v, trivially true + * U -> s: (U)(s)v == v, *this is bad*, + * first cast results in implementation defined value, + * second cast is defined. However, this creates false positives + * uint16_t x = 0xFFFF + * (uint16_t)(int8_t)x == x + * => (uint16_t)-1 == x + * => 0xFFFF == x + * => true + * + * So for the eight cases three are trivially true, three more are valid casts, and two are special. + * The two 'full' checks which otherwise require two comparisons are valid cast checks. + * The two remaining checks s -> U [v >= 0] and U -> s [v <= max(s)] can be done with one op. + */ + +template <typename D, typename S> +static constexpr inline +typename std::enable_if<(std::is_integral<S>::value || std::is_enum<S>::value) && + (std::is_integral<D>::value || std::is_enum<D>::value), bool>::type +/*bool*/ SkTFitsIn(S src) { + // SkTFitsIn() is used in public headers, so needs to be written targeting at most C++11. + return + + // E.g. (int8_t)(uint8_t) int8_t(-1) == -1, but the uint8_t == 255, not -1. + (std::is_signed<S>::value && std::is_unsigned<D>::value && sizeof(S) <= sizeof(D)) ? + (S)0 <= src : + + // E.g. (uint8_t)(int8_t) uint8_t(255) == 255, but the int8_t == -1. + (std::is_signed<D>::value && std::is_unsigned<S>::value && sizeof(D) <= sizeof(S)) ? + src <= (S)std::numeric_limits<typename sk_strip_enum<D>::type>::max() : + +#if !defined(SK_DEBUG) && !defined(__MSVC_RUNTIME_CHECKS ) + // Correct (simple) version. This trips up MSVC's /RTCc run-time checking. + (S)(D)src == src; +#else + // More complex version that's safe with /RTCc. Used in all debug builds, for coverage. + (std::is_signed<S>::value) ? + (intmax_t)src >= (intmax_t)std::numeric_limits<typename sk_strip_enum<D>::type>::min() && + (intmax_t)src <= (intmax_t)std::numeric_limits<typename sk_strip_enum<D>::type>::max() : + + // std::is_unsigned<S> ? + (uintmax_t)src <= (uintmax_t)std::numeric_limits<typename sk_strip_enum<D>::type>::max(); +#endif +} + +#endif diff --git a/src/deps/skia/include/private/SkTHash.h b/src/deps/skia/include/private/SkTHash.h new file mode 100644 index 000000000..9ed039748 --- /dev/null +++ b/src/deps/skia/include/private/SkTHash.h @@ -0,0 +1,548 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTHash_DEFINED +#define SkTHash_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkChecksum.h" +#include "include/private/SkTemplates.h" +#include <new> +#include <utility> + +// Before trying to use SkTHashTable, look below to see if SkTHashMap or SkTHashSet works for you. +// They're easier to use, usually perform the same, and have fewer sharp edges. + +// T and K are treated as ordinary copyable C++ types. +// Traits must have: +// - static K GetKey(T) +// - static uint32_t Hash(K) +// If the key is large and stored inside T, you may want to make K a const&. +// Similarly, if T is large you might want it to be a pointer. +template <typename T, typename K, typename Traits = T> +class SkTHashTable { +public: + SkTHashTable() = default; + ~SkTHashTable() = default; + + SkTHashTable(const SkTHashTable& that) { *this = that; } + SkTHashTable( SkTHashTable&& that) { *this = std::move(that); } + + SkTHashTable& operator=(const SkTHashTable& that) { + if (this != &that) { + fCount = that.fCount; + fCapacity = that.fCapacity; + fSlots.reset(that.fCapacity); + for (int i = 0; i < fCapacity; i++) { + fSlots[i] = that.fSlots[i]; + } + } + return *this; + } + + SkTHashTable& operator=(SkTHashTable&& that) { + if (this != &that) { + fCount = that.fCount; + fCapacity = that.fCapacity; + fSlots = std::move(that.fSlots); + + that.fCount = that.fCapacity = 0; + } + return *this; + } + + // Clear the table. + void reset() { *this = SkTHashTable(); } + + // How many entries are in the table? + int count() const { return fCount; } + + // How many slots does the table contain? (Note that unlike an array, hash tables can grow + // before reaching 100% capacity.) + int capacity() const { return fCapacity; } + + // Approximately how many bytes of memory do we use beyond sizeof(*this)? + size_t approxBytesUsed() const { return fCapacity * sizeof(Slot); } + + // !!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!! + // set(), find() and foreach() all allow mutable access to table entries. + // If you change an entry so that it no longer has the same key, all hell + // will break loose. Do not do that! + // + // Please prefer to use SkTHashMap or SkTHashSet, which do not have this danger. + + // The pointers returned by set() and find() are valid only until the next call to set(). + // The pointers you receive in foreach() are only valid for its duration. + + // Copy val into the hash table, returning a pointer to the copy now in the table. + // If there already is an entry in the table with the same key, we overwrite it. + T* set(T val) { + if (4 * fCount >= 3 * fCapacity) { + this->resize(fCapacity > 0 ? fCapacity * 2 : 4); + } + return this->uncheckedSet(std::move(val)); + } + + // If there is an entry in the table with this key, return a pointer to it. If not, null. + T* find(const K& key) const { + uint32_t hash = Hash(key); + int index = hash & (fCapacity-1); + for (int n = 0; n < fCapacity; n++) { + Slot& s = fSlots[index]; + if (s.empty()) { + return nullptr; + } + if (hash == s.hash && key == Traits::GetKey(*s)) { + return &*s; + } + index = this->next(index); + } + SkASSERT(fCapacity == 0); + return nullptr; + } + + // If there is an entry in the table with this key, return it. If not, null. + // This only works for pointer type T, and cannot be used to find an nullptr entry. + T findOrNull(const K& key) const { + if (T* p = this->find(key)) { + return *p; + } + return nullptr; + } + + // Remove the value with this key from the hash table. + void remove(const K& key) { + SkASSERT(this->find(key)); + + uint32_t hash = Hash(key); + int index = hash & (fCapacity-1); + for (int n = 0; n < fCapacity; n++) { + Slot& s = fSlots[index]; + SkASSERT(s.has_value()); + if (hash == s.hash && key == Traits::GetKey(*s)) { + this->removeSlot(index); + if (4 * fCount <= fCapacity && fCapacity > 4) { + this->resize(fCapacity / 2); + } + return; + } + index = this->next(index); + } + } + + // Call fn on every entry in the table. You may mutate the entries, but be very careful. + template <typename Fn> // f(T*) + void foreach(Fn&& fn) { + for (int i = 0; i < fCapacity; i++) { + if (fSlots[i].has_value()) { + fn(&*fSlots[i]); + } + } + } + + // Call fn on every entry in the table. You may not mutate anything. + template <typename Fn> // f(T) or f(const T&) + void foreach(Fn&& fn) const { + for (int i = 0; i < fCapacity; i++) { + if (fSlots[i].has_value()) { + fn(*fSlots[i]); + } + } + } + + // A basic iterator-like class which disallows mutation; sufficient for range-based for loops. + // Intended for use by SkTHashMap and SkTHashSet via begin() and end(). + // Adding or removing elements may invalidate all iterators. + template <typename SlotVal> + class Iter { + public: + using TTable = SkTHashTable<T, K, Traits>; + + Iter(const TTable* table, int slot) : fTable(table), fSlot(slot) {} + + static Iter MakeBegin(const TTable* table) { + return Iter{table, table->firstPopulatedSlot()}; + } + + static Iter MakeEnd(const TTable* table) { + return Iter{table, table->capacity()}; + } + + const SlotVal& operator*() const { + return *fTable->slot(fSlot); + } + + const SlotVal* operator->() const { + return fTable->slot(fSlot); + } + + bool operator==(const Iter& that) const { + // Iterators from different tables shouldn't be compared against each other. + SkASSERT(fTable == that.fTable); + return fSlot == that.fSlot; + } + + bool operator!=(const Iter& that) const { + return !(*this == that); + } + + Iter& operator++() { + fSlot = fTable->nextPopulatedSlot(fSlot); + return *this; + } + + Iter operator++(int) { + Iter old = *this; + this->operator++(); + return old; + } + + protected: + const TTable* fTable; + int fSlot; + }; + +private: + // Finds the first non-empty slot for an iterator. + int firstPopulatedSlot() const { + for (int i = 0; i < fCapacity; i++) { + if (fSlots[i].has_value()) { + return i; + } + } + return fCapacity; + } + + // Increments an iterator's slot. + int nextPopulatedSlot(int currentSlot) const { + for (int i = currentSlot + 1; i < fCapacity; i++) { + if (fSlots[i].has_value()) { + return i; + } + } + return fCapacity; + } + + // Reads from an iterator's slot. + const T* slot(int i) const { + SkASSERT(fSlots[i].has_value()); + return &*fSlots[i]; + } + + T* uncheckedSet(T&& val) { + const K& key = Traits::GetKey(val); + SkASSERT(key == key); + uint32_t hash = Hash(key); + int index = hash & (fCapacity-1); + for (int n = 0; n < fCapacity; n++) { + Slot& s = fSlots[index]; + if (s.empty()) { + // New entry. + s.emplace(std::move(val), hash); + fCount++; + return &*s; + } + if (hash == s.hash && key == Traits::GetKey(*s)) { + // Overwrite previous entry. + // Note: this triggers extra copies when adding the same value repeatedly. + s.emplace(std::move(val), hash); + return &*s; + } + + index = this->next(index); + } + SkASSERT(false); + return nullptr; + } + + void resize(int capacity) { + int oldCapacity = fCapacity; + SkDEBUGCODE(int oldCount = fCount); + + fCount = 0; + fCapacity = capacity; + SkAutoTArray<Slot> oldSlots = std::move(fSlots); + fSlots = SkAutoTArray<Slot>(capacity); + + for (int i = 0; i < oldCapacity; i++) { + Slot& s = oldSlots[i]; + if (s.has_value()) { + this->uncheckedSet(*std::move(s)); + } + } + SkASSERT(fCount == oldCount); + } + + void removeSlot(int index) { + fCount--; + + // Rearrange elements to restore the invariants for linear probing. + for (;;) { + Slot& emptySlot = fSlots[index]; + int emptyIndex = index; + int originalIndex; + // Look for an element that can be moved into the empty slot. + // If the empty slot is in between where an element landed, and its native slot, then + // move it to the empty slot. Don't move it if its native slot is in between where + // the element landed and the empty slot. + // [native] <= [empty] < [candidate] == GOOD, can move candidate to empty slot + // [empty] < [native] < [candidate] == BAD, need to leave candidate where it is + do { + index = this->next(index); + Slot& s = fSlots[index]; + if (s.empty()) { + // We're done shuffling elements around. Clear the last empty slot. + emptySlot.reset(); + return; + } + originalIndex = s.hash & (fCapacity - 1); + } while ((index <= originalIndex && originalIndex < emptyIndex) + || (originalIndex < emptyIndex && emptyIndex < index) + || (emptyIndex < index && index <= originalIndex)); + // Move the element to the empty slot. + Slot& moveFrom = fSlots[index]; + emptySlot = std::move(moveFrom); + } + } + + int next(int index) const { + index--; + if (index < 0) { index += fCapacity; } + return index; + } + + static uint32_t Hash(const K& key) { + uint32_t hash = Traits::Hash(key) & 0xffffffff; + return hash ? hash : 1; // We reserve hash 0 to mark empty. + } + + struct Slot { + Slot() = default; + ~Slot() { this->reset(); } + + Slot(const Slot& that) { *this = that; } + Slot& operator=(const Slot& that) { + if (this == &that) { + return *this; + } + if (hash) { + if (that.hash) { + val.storage = that.val.storage; + hash = that.hash; + } else { + this->reset(); + } + } else { + if (that.hash) { + new (&val.storage) T(that.val.storage); + hash = that.hash; + } else { + // do nothing, no value on either side + } + } + return *this; + } + + Slot(Slot&& that) { *this = std::move(that); } + Slot& operator=(Slot&& that) { + if (this == &that) { + return *this; + } + if (hash) { + if (that.hash) { + val.storage = std::move(that.val.storage); + hash = that.hash; + } else { + this->reset(); + } + } else { + if (that.hash) { + new (&val.storage) T(std::move(that.val.storage)); + hash = that.hash; + } else { + // do nothing, no value on either side + } + } + return *this; + } + + T& operator*() & { return val.storage; } + const T& operator*() const& { return val.storage; } + T&& operator*() && { return std::move(val.storage); } + const T&& operator*() const&& { return std::move(val.storage); } + + Slot& emplace(T&& v, uint32_t h) { + this->reset(); + new (&val.storage) T(std::move(v)); + hash = h; + return *this; + } + + bool has_value() const { return hash != 0; } + explicit operator bool() const { return this->has_value(); } + bool empty() const { return !this->has_value(); } + + void reset() { + if (hash) { + val.storage.~T(); + hash = 0; + } + } + + uint32_t hash = 0; + + private: + union Storage { + T storage; + Storage() {} + ~Storage() {} + } val; + }; + + int fCount = 0, + fCapacity = 0; + SkAutoTArray<Slot> fSlots; +}; + +// Maps K->V. A more user-friendly wrapper around SkTHashTable, suitable for most use cases. +// K and V are treated as ordinary copyable C++ types, with no assumed relationship between the two. +template <typename K, typename V, typename HashK = SkGoodHash> +class SkTHashMap { +public: + // Clear the map. + void reset() { fTable.reset(); } + + // How many key/value pairs are in the table? + int count() const { return fTable.count(); } + + // Approximately how many bytes of memory do we use beyond sizeof(*this)? + size_t approxBytesUsed() const { return fTable.approxBytesUsed(); } + + // N.B. The pointers returned by set() and find() are valid only until the next call to set(). + + // Set key to val in the table, replacing any previous value with the same key. + // We copy both key and val, and return a pointer to the value copy now in the table. + V* set(K key, V val) { + Pair* out = fTable.set({std::move(key), std::move(val)}); + return &out->second; + } + + // If there is key/value entry in the table with this key, return a pointer to the value. + // If not, return null. + V* find(const K& key) const { + if (Pair* p = fTable.find(key)) { + return &p->second; + } + return nullptr; + } + + V& operator[](const K& key) { + if (V* val = this->find(key)) { + return *val; + } + return *this->set(key, V{}); + } + + // Remove the key/value entry in the table with this key. + void remove(const K& key) { + SkASSERT(this->find(key)); + fTable.remove(key); + } + + // Call fn on every key/value pair in the table. You may mutate the value but not the key. + template <typename Fn> // f(K, V*) or f(const K&, V*) + void foreach(Fn&& fn) { + fTable.foreach([&fn](Pair* p){ fn(p->first, &p->second); }); + } + + // Call fn on every key/value pair in the table. You may not mutate anything. + template <typename Fn> // f(K, V), f(const K&, V), f(K, const V&) or f(const K&, const V&). + void foreach(Fn&& fn) const { + fTable.foreach([&fn](const Pair& p){ fn(p.first, p.second); }); + } + + // Dereferencing an iterator gives back a key-value pair, suitable for structured binding. + struct Pair : public std::pair<K, V> { + using std::pair<K, V>::pair; + static const K& GetKey(const Pair& p) { return p.first; } + static auto Hash(const K& key) { return HashK()(key); } + }; + + using Iter = typename SkTHashTable<Pair, K>::template Iter<std::pair<K, V>>; + + Iter begin() const { + return Iter::MakeBegin(&fTable); + } + + Iter end() const { + return Iter::MakeEnd(&fTable); + } + +private: + SkTHashTable<Pair, K> fTable; +}; + +// A set of T. T is treated as an ordinary copyable C++ type. +template <typename T, typename HashT = SkGoodHash> +class SkTHashSet { +public: + // Clear the set. + void reset() { fTable.reset(); } + + // How many items are in the set? + int count() const { return fTable.count(); } + + // Is empty? + bool empty() const { return fTable.count() == 0; } + + // Approximately how many bytes of memory do we use beyond sizeof(*this)? + size_t approxBytesUsed() const { return fTable.approxBytesUsed(); } + + // Copy an item into the set. + void add(T item) { fTable.set(std::move(item)); } + + // Is this item in the set? + bool contains(const T& item) const { return SkToBool(this->find(item)); } + + // If an item equal to this is in the set, return a pointer to it, otherwise null. + // This pointer remains valid until the next call to add(). + const T* find(const T& item) const { return fTable.find(item); } + + // Remove the item in the set equal to this. + void remove(const T& item) { + SkASSERT(this->contains(item)); + fTable.remove(item); + } + + // Call fn on every item in the set. You may not mutate anything. + template <typename Fn> // f(T), f(const T&) + void foreach (Fn&& fn) const { + fTable.foreach(fn); + } + +private: + struct Traits { + static const T& GetKey(const T& item) { return item; } + static auto Hash(const T& item) { return HashT()(item); } + }; + +public: + using Iter = typename SkTHashTable<T, T, Traits>::template Iter<T>; + + Iter begin() const { + return Iter::MakeBegin(&fTable); + } + + Iter end() const { + return Iter::MakeEnd(&fTable); + } + +private: + SkTHashTable<T, T, Traits> fTable; +}; + +#endif//SkTHash_DEFINED diff --git a/src/deps/skia/include/private/SkTLogic.h b/src/deps/skia/include/private/SkTLogic.h new file mode 100644 index 000000000..a2c2f4cfd --- /dev/null +++ b/src/deps/skia/include/private/SkTLogic.h @@ -0,0 +1,86 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * + * This header provides some std:: features early in the skstd namespace + * and several Skia-specific additions in the sknonstd namespace. + */ + +#ifndef SkTLogic_DEFINED +#define SkTLogic_DEFINED + +#include <cstddef> +#include <type_traits> +#include <utility> +#include "include/private/SkTo.h" + +namespace skstd { + +// C++17, <variant> +struct monostate {}; + +// C++17, <type_traits> +template<typename...> struct conjunction : std::true_type { }; +template<typename T> struct conjunction<T> : T { }; +template<typename T, typename... Ts> +struct conjunction<T, Ts...> : std::conditional<bool(T::value), conjunction<Ts...>, T>::type { }; + +// C++17, std::data, std::size +template<typename Container> +constexpr auto data(Container& c) -> decltype(c.data()) { return c.data(); } +template<typename Container> +constexpr auto data(const Container& c) -> decltype(c.data()) { return c.data(); } +template<typename Array, size_t N> +constexpr auto data(Array(&a)[N]) -> decltype(a) { return a; } +template<typename T> +constexpr const T* data(std::initializer_list<T> i) { return i.begin(); } + +template<typename Container> +constexpr auto size(Container& c) -> decltype(c.size()) { return c.size(); } +template<typename Array, size_t N> +constexpr size_t size(Array(&)[N]) { return N; } +template<typename T> +constexpr const T* size(std::initializer_list<T> i) { return i.end() - i.begin(); } +} // namespace skstd + +// The sknonstd namespace contains things we would like to be proposed and feel std-ish. +namespace sknonstd { + +// The name 'copy' here is fraught with peril. In this case it means 'append', not 'overwrite'. +// Alternate proposed names are 'propagate', 'augment', or 'append' (and 'add', but already taken). +// std::experimental::propagate_const already exists for other purposes in TSv2. +// These also follow the <dest, source> pattern used by boost. +template <typename D, typename S> struct copy_const { + using type = std::conditional_t<std::is_const<S>::value, std::add_const_t<D>, D>; +}; +template <typename D, typename S> using copy_const_t = typename copy_const<D, S>::type; + +template <typename D, typename S> struct copy_volatile { + using type = std::conditional_t<std::is_volatile<S>::value, std::add_volatile_t<D>, D>; +}; +template <typename D, typename S> using copy_volatile_t = typename copy_volatile<D, S>::type; + +template <typename D, typename S> struct copy_cv { + using type = copy_volatile_t<copy_const_t<D, S>, S>; +}; +template <typename D, typename S> using copy_cv_t = typename copy_cv<D, S>::type; + +// The name 'same' here means 'overwrite'. +// Alternate proposed names are 'replace', 'transfer', or 'qualify_from'. +// same_xxx<D, S> can be written as copy_xxx<remove_xxx_t<D>, S> +template <typename D, typename S> using same_const = copy_const<std::remove_const_t<D>, S>; +template <typename D, typename S> using same_const_t = typename same_const<D, S>::type; +template <typename D, typename S> using same_volatile =copy_volatile<std::remove_volatile_t<D>,S>; +template <typename D, typename S> using same_volatile_t = typename same_volatile<D, S>::type; +template <typename D, typename S> using same_cv = copy_cv<std::remove_cv_t<D>, S>; +template <typename D, typename S> using same_cv_t = typename same_cv<D, S>::type; + +} // namespace sknonstd + +template <typename Container> +constexpr int SkCount(const Container& c) { return SkTo<int>(skstd::size(c)); } + +#endif diff --git a/src/deps/skia/include/private/SkTOptional.h b/src/deps/skia/include/private/SkTOptional.h new file mode 100644 index 000000000..f610493b0 --- /dev/null +++ b/src/deps/skia/include/private/SkTOptional.h @@ -0,0 +1,362 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTOptional_DEFINED +#define SkTOptional_DEFINED + +#include "include/core/SkTypes.h" + +#include <utility> + +namespace skstd { + +/** + * An empty optional is represented with `nullopt`. + */ +struct nullopt_t { + struct tag {}; + + // nullopt_t must not be default-constructible. + explicit constexpr nullopt_t(tag) {} +}; + +static constexpr nullopt_t nullopt{nullopt_t::tag{}}; + +/** + * Simple drop-in replacement for std::optional until we move to C++17. This does not have all of + * std::optional's capabilities, but it covers our needs for the time being. + */ +template<typename T> +class optional { +public: + optional(const T& value) + : fHasValue(true) { + new(&fPayload.fValue) T(value); + } + + optional(T&& value) + : fHasValue(true) { + new(&fPayload.fValue) T(std::move(value)); + } + + optional() {} + + optional(const optional& other) { + *this = other; + } + + // Construction with nullopt is the same as default construction. + optional(nullopt_t) : optional() {} + + // We need a non-const copy constructor because otherwise optional(nonConstSrc) isn't an exact + // match for the copy constructor, and we'd end up invoking the Args&&... template by mistake. + optional(optional& other) { + *this = other; + } + + optional(optional&& other) { + *this = std::move(other); + } + + template<typename... Args> + optional(Args&&... args) { + fHasValue = true; + new(&fPayload.fValue) T(std::forward<Args>(args)...); + } + + ~optional() { + this->reset(); + } + + optional& operator=(const optional& other) { + if (this != &other) { + if (fHasValue) { + if (other.fHasValue) { + fPayload.fValue = other.fPayload.fValue; + } else { + this->reset(); + } + } else { + if (other.fHasValue) { + fHasValue = true; + new (&fPayload.fValue) T(other.fPayload.fValue); + } else { + // do nothing, no value on either side + } + } + } + return *this; + } + + optional& operator=(optional&& other) { + if (this != &other) { + if (fHasValue) { + if (other.fHasValue) { + fPayload.fValue = std::move(other.fPayload.fValue); + } else { + this->reset(); + } + } else { + if (other.fHasValue) { + fHasValue = true; + new (&fPayload.fValue) T(std::move(other.fPayload.fValue)); + } else { + // do nothing, no value on either side + } + } + } + return *this; + } + + template<typename... Args> + optional& emplace(Args&&... args) { + this->reset(); + fHasValue = true; + new(&fPayload.fValue) T(std::forward<Args>(args)...); + return *this; + } + + template<typename U, typename... Args> + optional& emplace(std::initializer_list<U> il, Args&&... args) { + this->reset(); + fHasValue = true; + new(&fPayload.fValue) T(il, std::forward<Args>(args)...); + return *this; + } + + // Assignment to nullopt is the same as reset(). + optional& operator=(nullopt_t) { + this->reset(); + return *this; + } + + T& operator*() & { + SkASSERT(fHasValue); + return fPayload.fValue; + } + + const T& operator*() const& { + SkASSERT(fHasValue); + return fPayload.fValue; + } + + T&& operator*() && { + SkASSERT(fHasValue); + return std::move(fPayload.fValue); + } + + const T&& operator*() const&& { + SkASSERT(fHasValue); + return std::move(fPayload.fValue); + } + + const T& value() const& { + SkASSERT_RELEASE(fHasValue); + return **this; + } + + T& value() & { + SkASSERT_RELEASE(fHasValue); + return **this; + } + + const T&& value() const&& { + SkASSERT_RELEASE(fHasValue); + return std::move(**this); + } + + T&& value() && { + SkASSERT_RELEASE(fHasValue); + return std::move(**this); + } + + T* operator->() { + return &**this; + } + + const T* operator->() const { + return &**this; + } + + template<typename U> + T value_or(U&& value) const& { + return this->has_value() ? **this : static_cast<T>(std::forward<U>(value)); + } + + template<typename U> + T value_or(U&& value) && { + return this->has_value() ? std::move(**this) : static_cast<T>(std::forward<U>(value)); + } + + bool has_value() const { + return fHasValue; + } + + explicit operator bool() const { + return this->has_value(); + } + + void reset() { + if (fHasValue) { + fPayload.fValue.~T(); + fHasValue = false; + } + } + +private: + union Payload { + T fValue; + + Payload() {} + + ~Payload() {} + } fPayload; + + bool fHasValue = false; +}; + +// Comparison operators for optional x optional +template <typename T, typename U> bool operator==(const optional<T>& a, const optional<U>& b) { + return (a.has_value() != b.has_value()) ? false : + !a.has_value() ? true : + (*a == *b); +} + +template <typename T, typename U> bool operator!=(const optional<T>& a, const optional<U>& b) { + return (a.has_value() != b.has_value()) ? true : + !a.has_value() ? false : + (*a != *b); +} + +template <typename T, typename U> bool operator<(const optional<T>& a, const optional<U>& b) { + return !b.has_value() ? false : + !a.has_value() ? true : + (*a < *b); +} + +template <typename T, typename U> bool operator<=(const optional<T>& a, const optional<U>& b) { + return !a.has_value() ? true : + !b.has_value() ? false : + (*a <= *b); +} + +template <typename T, typename U> bool operator>(const optional<T>& a, const optional<U>& b) { + return !a.has_value() ? false : + !b.has_value() ? true : + (*a > *b); +} + +template <typename T, typename U> bool operator>=(const optional<T>& a, const optional<U>& b) { + return !b.has_value() ? true : + !a.has_value() ? false : + (*a >= *b); +} + +// Comparison operators for optional x nullopt +template <typename T> bool operator==(const optional<T>& a, nullopt_t) { + return !a.has_value(); +} + +template <typename T> bool operator!=(const optional<T>& a, nullopt_t) { + return a.has_value(); +} + +template <typename T> bool operator<(const optional<T>&, nullopt_t) { + return false; +} + +template <typename T> bool operator<=(const optional<T>& a, nullopt_t) { + return !a.has_value(); +} + +template <typename T> bool operator>(const optional<T>& a, nullopt_t) { + return a.has_value(); +} + +template <typename T> +bool operator>=(const optional<T>&, nullopt_t) { + return true; +} + +// Comparison operators for nullopt x optional +template <typename U> bool operator==(nullopt_t, const optional<U>& b) { + return !b.has_value(); +} + +template <typename U> bool operator!=(nullopt_t, const optional<U>& b) { + return b.has_value(); +} + +template <typename U> bool operator<(nullopt_t, const optional<U>& b) { + return b.has_value(); +} + +template <typename U> bool operator<=(nullopt_t, const optional<U>&) { + return true; +} + +template <typename U> bool operator>(nullopt_t, const optional<U>&) { + return false; +} + +template <typename U> bool operator>=(nullopt_t, const optional<U>& b) { + return !b.has_value(); +} + +// Comparison operators for optional x value +template <typename T, typename U> bool operator==(const optional<T>& a, const U& b) { + return a.has_value() && (*a == b); +} + +template <typename T, typename U> bool operator!=(const optional<T>& a, const U& b) { + return !a.has_value() || (*a != b); +} + +template <typename T, typename U> bool operator<(const optional<T>& a, const U& b) { + return !a.has_value() || (*a < b); +} + +template <typename T, typename U> bool operator<=(const optional<T>& a, const U& b) { + return !a.has_value() || (*a <= b); +} + +template <typename T, typename U> bool operator>(const optional<T>& a, const U& b) { + return a.has_value() && (*a > b); +} + +template <typename T, typename U> bool operator>=(const optional<T>& a, const U& b) { + return a.has_value() && (*a >= b); +} + +// Comparison operators for value x optional +template <typename T, typename U> bool operator==(const T& a, const optional<U>& b) { + return b.has_value() && (a == *b); +} + +template <typename T, typename U> bool operator!=(const T& a, const optional<U>& b) { + return !b.has_value() || (a != *b); +} + +template <typename T, typename U> bool operator<(const T& a, const optional<U>& b) { + return b.has_value() && (a < *b); +} + +template <typename T, typename U> bool operator<=(const T& a, const optional<U>& b) { + return b.has_value() && (a <= *b); +} + +template <typename T, typename U> bool operator>(const T& a, const optional<U>& b) { + return !b.has_value() || (a > *b); +} + +template <typename T, typename U> bool operator>=(const T& a, const optional<U>& b) { + return !b.has_value() || (a >= *b); +} + +} // namespace skstd + +#endif diff --git a/src/deps/skia/include/private/SkTPin.h b/src/deps/skia/include/private/SkTPin.h new file mode 100644 index 000000000..c824c4464 --- /dev/null +++ b/src/deps/skia/include/private/SkTPin.h @@ -0,0 +1,23 @@ +/* + * Copyright 2020 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTPin_DEFINED +#define SkTPin_DEFINED + +#include <algorithm> + +/** @return x pinned (clamped) between lo and hi, inclusively. + + Unlike std::clamp(), SkTPin() always returns a value between lo and hi. + If x is NaN, SkTPin() returns lo but std::clamp() returns NaN. +*/ +template <typename T> +static constexpr const T& SkTPin(const T& x, const T& lo, const T& hi) { + return std::max(lo, std::min(x, hi)); +} + +#endif diff --git a/src/deps/skia/include/private/SkTemplates.h b/src/deps/skia/include/private/SkTemplates.h new file mode 100644 index 000000000..4221ee14d --- /dev/null +++ b/src/deps/skia/include/private/SkTemplates.h @@ -0,0 +1,453 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTemplates_DEFINED +#define SkTemplates_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkMalloc.h" +#include "include/private/SkTLogic.h" + +#include <string.h> +#include <array> +#include <cstddef> +#include <memory> +#include <new> +#include <type_traits> +#include <utility> + +/** \file SkTemplates.h + + This file contains light-weight template classes for type-safe and exception-safe + resource management. +*/ + +/** + * Marks a local variable as known to be unused (to avoid warnings). + * Note that this does *not* prevent the local variable from being optimized away. + */ +template<typename T> inline void sk_ignore_unused_variable(const T&) { } + +/** + * Returns a pointer to a D which comes immediately after S[count]. + */ +template <typename D, typename S> static D* SkTAfter(S* ptr, size_t count = 1) { + return reinterpret_cast<D*>(ptr + count); +} + +/** + * Returns a pointer to a D which comes byteOffset bytes after S. + */ +template <typename D, typename S> static D* SkTAddOffset(S* ptr, ptrdiff_t byteOffset) { + // The intermediate char* has the same cv-ness as D as this produces better error messages. + // This relies on the fact that reinterpret_cast can add constness, but cannot remove it. + return reinterpret_cast<D*>(reinterpret_cast<sknonstd::same_cv_t<char, D>*>(ptr) + byteOffset); +} + +// TODO: when C++17 the language is available, use template <auto P> +template <typename T, T* P> struct SkFunctionWrapper { + template <typename... Args> + auto operator()(Args&&... args) const -> decltype(P(std::forward<Args>(args)...)) { + return P(std::forward<Args>(args)...); + } +}; + +/** \class SkAutoTCallVProc + + Call a function when this goes out of scope. The template uses two + parameters, the object, and a function that is to be called in the destructor. + If release() is called, the object reference is set to null. If the object + reference is null when the destructor is called, we do not call the + function. +*/ +template <typename T, void (*P)(T*)> class SkAutoTCallVProc + : public std::unique_ptr<T, SkFunctionWrapper<std::remove_pointer_t<decltype(P)>, P>> { + using inherited = std::unique_ptr<T, SkFunctionWrapper<std::remove_pointer_t<decltype(P)>, P>>; +public: + using inherited::inherited; + SkAutoTCallVProc(const SkAutoTCallVProc&) = delete; + SkAutoTCallVProc(SkAutoTCallVProc&& that) : inherited(std::move(that)) {} + + operator T*() const { return this->get(); } +}; + +/** Allocate an array of T elements, and free the array in the destructor + */ +template <typename T> class SkAutoTArray { +public: + SkAutoTArray() {} + /** Allocate count number of T elements + */ + explicit SkAutoTArray(int count) { + SkASSERT(count >= 0); + if (count) { + fArray.reset(new T[count]); + } + SkDEBUGCODE(fCount = count;) + } + + SkAutoTArray(SkAutoTArray&& other) : fArray(std::move(other.fArray)) { + SkDEBUGCODE(fCount = other.fCount; other.fCount = 0;) + } + SkAutoTArray& operator=(SkAutoTArray&& other) { + if (this != &other) { + fArray = std::move(other.fArray); + SkDEBUGCODE(fCount = other.fCount; other.fCount = 0;) + } + return *this; + } + + /** Reallocates given a new count. Reallocation occurs even if new count equals old count. + */ + void reset(int count = 0) { *this = SkAutoTArray(count); } + + /** Return the array of T elements. Will be NULL if count == 0 + */ + T* get() const { return fArray.get(); } + + /** Return the nth element in the array + */ + T& operator[](int index) const { + SkASSERT((unsigned)index < (unsigned)fCount); + return fArray[index]; + } + + /** Aliases matching other types, like std::vector. */ + const T* data() const { return fArray.get(); } + T* data() { return fArray.get(); } + +private: + std::unique_ptr<T[]> fArray; + SkDEBUGCODE(int fCount = 0;) +}; + +/** Wraps SkAutoTArray, with room for kCountRequested elements preallocated. + */ +template <int kCountRequested, typename T> class SkAutoSTArray { +public: + SkAutoSTArray(SkAutoSTArray&&) = delete; + SkAutoSTArray(const SkAutoSTArray&) = delete; + SkAutoSTArray& operator=(SkAutoSTArray&&) = delete; + SkAutoSTArray& operator=(const SkAutoSTArray&) = delete; + + /** Initialize with no objects */ + SkAutoSTArray() { + fArray = nullptr; + fCount = 0; + } + + /** Allocate count number of T elements + */ + SkAutoSTArray(int count) { + fArray = nullptr; + fCount = 0; + this->reset(count); + } + + ~SkAutoSTArray() { + this->reset(0); + } + + /** Destroys previous objects in the array and default constructs count number of objects */ + void reset(int count) { + T* start = fArray; + T* iter = start + fCount; + while (iter > start) { + (--iter)->~T(); + } + + SkASSERT(count >= 0); + if (fCount != count) { + if (fCount > kCount) { + // 'fArray' was allocated last time so free it now + SkASSERT((T*) fStorage != fArray); + sk_free(fArray); + } + + if (count > kCount) { + fArray = (T*) sk_malloc_throw(count, sizeof(T)); + } else if (count > 0) { + fArray = (T*) fStorage; + } else { + fArray = nullptr; + } + + fCount = count; + } + + iter = fArray; + T* stop = fArray + count; + while (iter < stop) { + new (iter++) T; + } + } + + /** Return the number of T elements in the array + */ + int count() const { return fCount; } + + /** Return the array of T elements. Will be NULL if count == 0 + */ + T* get() const { return fArray; } + + T* begin() { return fArray; } + + const T* begin() const { return fArray; } + + T* end() { return fArray + fCount; } + + const T* end() const { return fArray + fCount; } + + /** Return the nth element in the array + */ + T& operator[](int index) const { + SkASSERT(index < fCount); + return fArray[index]; + } + + /** Aliases matching other types, like std::vector. */ + const T* data() const { return fArray; } + T* data() { return fArray; } + size_t size() const { return fCount; } + +private: +#if defined(SK_BUILD_FOR_GOOGLE3) + // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions + // have multiple large stack allocations. + static const int kMaxBytes = 4 * 1024; + static const int kCount = kCountRequested * sizeof(T) > kMaxBytes + ? kMaxBytes / sizeof(T) + : kCountRequested; +#else + static const int kCount = kCountRequested; +#endif + + int fCount; + T* fArray; + // since we come right after fArray, fStorage should be properly aligned + char fStorage[kCount * sizeof(T)]; +}; + +/** Manages an array of T elements, freeing the array in the destructor. + * Does NOT call any constructors/destructors on T (T must be POD). + */ +template <typename T, + typename = std::enable_if_t<std::is_trivially_default_constructible<T>::value && + std::is_trivially_destructible<T>::value>> +class SkAutoTMalloc { +public: + /** Takes ownership of the ptr. The ptr must be a value which can be passed to sk_free. */ + explicit SkAutoTMalloc(T* ptr = nullptr) : fPtr(ptr) {} + + /** Allocates space for 'count' Ts. */ + explicit SkAutoTMalloc(size_t count) + : fPtr(count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr) {} + + SkAutoTMalloc(SkAutoTMalloc&&) = default; + SkAutoTMalloc& operator=(SkAutoTMalloc&&) = default; + + /** Resize the memory area pointed to by the current ptr preserving contents. */ + void realloc(size_t count) { + fPtr.reset(count ? (T*)sk_realloc_throw(fPtr.release(), count * sizeof(T)) : nullptr); + } + + /** Resize the memory area pointed to by the current ptr without preserving contents. */ + T* reset(size_t count = 0) { + fPtr.reset(count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr); + return this->get(); + } + + T* get() const { return fPtr.get(); } + + operator T*() { return fPtr.get(); } + + operator const T*() const { return fPtr.get(); } + + T& operator[](int index) { return fPtr.get()[index]; } + + const T& operator[](int index) const { return fPtr.get()[index]; } + + /** Aliases matching other types, like std::vector. */ + const T* data() const { return fPtr.get(); } + T* data() { return fPtr.get(); } + + /** + * Transfer ownership of the ptr to the caller, setting the internal + * pointer to NULL. Note that this differs from get(), which also returns + * the pointer, but it does not transfer ownership. + */ + T* release() { return fPtr.release(); } + +private: + std::unique_ptr<T, SkFunctionWrapper<void(void*), sk_free>> fPtr; +}; + +template <size_t kCountRequested, + typename T, + typename = std::enable_if_t<std::is_trivially_default_constructible<T>::value && + std::is_trivially_destructible<T>::value>> +class SkAutoSTMalloc { +public: + SkAutoSTMalloc() : fPtr(fTStorage) {} + + SkAutoSTMalloc(size_t count) { + if (count > kCount) { + fPtr = (T*)sk_malloc_throw(count, sizeof(T)); + } else if (count) { + fPtr = fTStorage; + } else { + fPtr = nullptr; + } + } + + SkAutoSTMalloc(SkAutoSTMalloc&&) = delete; + SkAutoSTMalloc(const SkAutoSTMalloc&) = delete; + SkAutoSTMalloc& operator=(SkAutoSTMalloc&&) = delete; + SkAutoSTMalloc& operator=(const SkAutoSTMalloc&) = delete; + + ~SkAutoSTMalloc() { + if (fPtr != fTStorage) { + sk_free(fPtr); + } + } + + // doesn't preserve contents + T* reset(size_t count) { + if (fPtr != fTStorage) { + sk_free(fPtr); + } + if (count > kCount) { + fPtr = (T*)sk_malloc_throw(count, sizeof(T)); + } else if (count) { + fPtr = fTStorage; + } else { + fPtr = nullptr; + } + return fPtr; + } + + T* get() const { return fPtr; } + + operator T*() { + return fPtr; + } + + operator const T*() const { + return fPtr; + } + + T& operator[](int index) { + return fPtr[index]; + } + + const T& operator[](int index) const { + return fPtr[index]; + } + + /** Aliases matching other types, like std::vector. */ + const T* data() const { return fPtr; } + T* data() { return fPtr; } + + // Reallocs the array, can be used to shrink the allocation. Makes no attempt to be intelligent + void realloc(size_t count) { + if (count > kCount) { + if (fPtr == fTStorage) { + fPtr = (T*)sk_malloc_throw(count, sizeof(T)); + memcpy((void*)fPtr, fTStorage, kCount * sizeof(T)); + } else { + fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T)); + } + } else if (count) { + if (fPtr != fTStorage) { + fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T)); + } + } else { + this->reset(0); + } + } + +private: + // Since we use uint32_t storage, we might be able to get more elements for free. + static const size_t kCountWithPadding = SkAlign4(kCountRequested*sizeof(T)) / sizeof(T); +#if defined(SK_BUILD_FOR_GOOGLE3) + // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions + // have multiple large stack allocations. + static const size_t kMaxBytes = 4 * 1024; + static const size_t kCount = kCountRequested * sizeof(T) > kMaxBytes + ? kMaxBytes / sizeof(T) + : kCountWithPadding; +#else + static const size_t kCount = kCountWithPadding; +#endif + + T* fPtr; + union { + uint32_t fStorage32[SkAlign4(kCount*sizeof(T)) >> 2]; + T fTStorage[1]; // do NOT want to invoke T::T() + }; +}; + +////////////////////////////////////////////////////////////////////////////////////////////////// + +/** + * Pass the object and the storage that was offered during SkInPlaceNewCheck, and this will + * safely destroy (and free if it was dynamically allocated) the object. + */ +template <typename T> void SkInPlaceDeleteCheck(T* obj, void* storage) { + if (storage == obj) { + obj->~T(); + } else { + delete obj; + } +} + +/** + * Allocates T, using storage if it is large enough, and allocating on the heap (via new) if + * storage is not large enough. + * + * obj = SkInPlaceNewCheck<Type>(storage, size); + * ... + * SkInPlaceDeleteCheck(obj, storage); + */ +template<typename T, typename... Args> +T* SkInPlaceNewCheck(void* storage, size_t size, Args&&... args) { + return (sizeof(T) <= size) ? new (storage) T(std::forward<Args>(args)...) + : new T(std::forward<Args>(args)...); +} + +template <int N, typename T> class SkAlignedSTStorage { +public: + SkAlignedSTStorage() {} + SkAlignedSTStorage(SkAlignedSTStorage&&) = delete; + SkAlignedSTStorage(const SkAlignedSTStorage&) = delete; + SkAlignedSTStorage& operator=(SkAlignedSTStorage&&) = delete; + SkAlignedSTStorage& operator=(const SkAlignedSTStorage&) = delete; + + /** + * Returns void* because this object does not initialize the + * memory. Use placement new for types that require a constructor. + */ + void* get() { return fStorage; } + const void* get() const { return fStorage; } +private: + alignas(T) char fStorage[sizeof(T)*N]; +}; + +using SkAutoFree = std::unique_ptr<void, SkFunctionWrapper<void(void*), sk_free>>; + +template<typename C, std::size_t... Is> +constexpr auto SkMakeArrayFromIndexSequence(C c, std::index_sequence<Is...> is) +-> std::array<decltype(c(std::declval<typename decltype(is)::value_type>())), sizeof...(Is)> { + return {{ c(Is)... }}; +} + +template<size_t N, typename C> constexpr auto SkMakeArray(C c) +-> std::array<decltype(c(std::declval<typename std::index_sequence<N>::value_type>())), N> { + return SkMakeArrayFromIndexSequence(c, std::make_index_sequence<N>{}); +} + +#endif diff --git a/src/deps/skia/include/private/SkThreadAnnotations.h b/src/deps/skia/include/private/SkThreadAnnotations.h new file mode 100644 index 000000000..07652a3fb --- /dev/null +++ b/src/deps/skia/include/private/SkThreadAnnotations.h @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkThreadAnnotations_DEFINED +#define SkThreadAnnotations_DEFINED + +// The bulk of this code is cribbed from: +// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html + +#if defined(__clang__) && (!defined(SWIG)) +#define SK_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x)) +#else +#define SK_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op +#endif + +#define SK_CAPABILITY(x) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(capability(x)) + +#define SK_SCOPED_CAPABILITY \ + SK_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable) + +#define SK_GUARDED_BY(x) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x)) + +#define SK_PT_GUARDED_BY(x) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x)) + +#define SK_ACQUIRED_BEFORE(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__)) + +#define SK_ACQUIRED_AFTER(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__)) + +#define SK_REQUIRES(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(requires_capability(__VA_ARGS__)) + +#define SK_REQUIRES_SHARED(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(requires_shared_capability(__VA_ARGS__)) + +#define SK_ACQUIRE(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(acquire_capability(__VA_ARGS__)) + +#define SK_ACQUIRE_SHARED(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(acquire_shared_capability(__VA_ARGS__)) + +// Would be SK_RELEASE, but that is already in use as SK_DEBUG vs. SK_RELEASE. +#define SK_RELEASE_CAPABILITY(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(release_capability(__VA_ARGS__)) + +// For symmetry with SK_RELEASE_CAPABILITY. +#define SK_RELEASE_SHARED_CAPABILITY(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(release_shared_capability(__VA_ARGS__)) + +#define SK_TRY_ACQUIRE(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(try_acquire_capability(__VA_ARGS__)) + +#define SK_TRY_ACQUIRE_SHARED(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(try_acquire_shared_capability(__VA_ARGS__)) + +#define SK_EXCLUDES(...) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__)) + +#define SK_ASSERT_CAPABILITY(x) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(assert_capability(x)) + +#define SK_ASSERT_SHARED_CAPABILITY(x) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_capability(x)) + +#define SK_RETURN_CAPABILITY(x) \ + SK_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x)) + +#define SK_NO_THREAD_SAFETY_ANALYSIS \ + SK_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis) + +#if defined(SK_BUILD_FOR_GOOGLE3) && !defined(SK_BUILD_FOR_WASM_IN_GOOGLE3) + extern "C" { + void __google_potentially_blocking_region_begin(void); + void __google_potentially_blocking_region_end (void); + } + #define SK_POTENTIALLY_BLOCKING_REGION_BEGIN __google_potentially_blocking_region_begin() + #define SK_POTENTIALLY_BLOCKING_REGION_END __google_potentially_blocking_region_end() +#else + #define SK_POTENTIALLY_BLOCKING_REGION_BEGIN + #define SK_POTENTIALLY_BLOCKING_REGION_END +#endif + +#endif // SkThreadAnnotations_DEFINED diff --git a/src/deps/skia/include/private/SkThreadID.h b/src/deps/skia/include/private/SkThreadID.h new file mode 100644 index 000000000..e14388b3d --- /dev/null +++ b/src/deps/skia/include/private/SkThreadID.h @@ -0,0 +1,20 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkThreadID_DEFINED +#define SkThreadID_DEFINED + +#include "include/core/SkTypes.h" + +typedef int64_t SkThreadID; + +// SkMutex.h uses SkGetThredID in debug only code. +SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID(); + +const SkThreadID kIllegalThreadID = 0; + +#endif // SkThreadID_DEFINED diff --git a/src/deps/skia/include/private/SkTo.h b/src/deps/skia/include/private/SkTo.h new file mode 100644 index 000000000..d788f7b26 --- /dev/null +++ b/src/deps/skia/include/private/SkTo.h @@ -0,0 +1,28 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef SkTo_DEFINED +#define SkTo_DEFINED + +#include "include/core/SkTypes.h" +#include "include/private/SkTFitsIn.h" + +template <typename D, typename S> constexpr D SkTo(S s) { + return SkASSERT(SkTFitsIn<D>(s)), + static_cast<D>(s); +} + +template <typename S> constexpr int8_t SkToS8(S x) { return SkTo<int8_t>(x); } +template <typename S> constexpr uint8_t SkToU8(S x) { return SkTo<uint8_t>(x); } +template <typename S> constexpr int16_t SkToS16(S x) { return SkTo<int16_t>(x); } +template <typename S> constexpr uint16_t SkToU16(S x) { return SkTo<uint16_t>(x); } +template <typename S> constexpr int32_t SkToS32(S x) { return SkTo<int32_t>(x); } +template <typename S> constexpr uint32_t SkToU32(S x) { return SkTo<uint32_t>(x); } +template <typename S> constexpr int SkToInt(S x) { return SkTo<int>(x); } +template <typename S> constexpr unsigned SkToUInt(S x) { return SkTo<unsigned>(x); } +template <typename S> constexpr size_t SkToSizeT(S x) { return SkTo<size_t>(x); } + +#endif // SkTo_DEFINED diff --git a/src/deps/skia/include/private/SkUniquePaintParamsID.h b/src/deps/skia/include/private/SkUniquePaintParamsID.h new file mode 100644 index 000000000..2cd89fd2f --- /dev/null +++ b/src/deps/skia/include/private/SkUniquePaintParamsID.h @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkUniquePaintParamsID_DEFINED +#define SkUniquePaintParamsID_DEFINED + +#include "include/core/SkTypes.h" + +// This class boils down to a unique uint that can be used instead of a variable length +// key derived from a PaintParams. +class SkUniquePaintParamsID { +public: + explicit SkUniquePaintParamsID(uint32_t id) : fID(id) { + SkASSERT(id != SK_InvalidUniqueID); + } + + static SkUniquePaintParamsID InvalidID() { return SkUniquePaintParamsID(); } + + SkUniquePaintParamsID() : fID(SK_InvalidUniqueID) {} + + bool operator==(const SkUniquePaintParamsID &that) const { return fID == that.fID; } + bool operator!=(const SkUniquePaintParamsID &that) const { return !(*this == that); } + + bool isValid() const { return fID != SK_InvalidUniqueID; } + uint32_t asUInt() const { return fID; } + +private: + uint32_t fID; +}; + +#endif // SkUniquePaintParamsID_DEFINED diff --git a/src/deps/skia/include/private/SkVx.h b/src/deps/skia/include/private/SkVx.h new file mode 100644 index 000000000..4f0f4ace0 --- /dev/null +++ b/src/deps/skia/include/private/SkVx.h @@ -0,0 +1,943 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKVX_DEFINED +#define SKVX_DEFINED + +// skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>. +// +// This time we're leaning a bit less on platform-specific intrinsics and a bit +// more on Clang/GCC vector extensions, but still keeping the option open to +// drop in platform-specific intrinsics, actually more easily than before. +// +// We've also fixed a few of the caveats that used to make SkNx awkward to work +// with across translation units. skvx::Vec<N,T> always has N*sizeof(T) size +// and alignment and is safe to use across translation units freely. +// (Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.) + +// Please try to keep this file independent of Skia headers. +#include <algorithm> // std::min, std::max +#include <cassert> // assert() +#include <cmath> // ceilf, floorf, truncf, roundf, sqrtf, etc. +#include <cstdint> // intXX_t +#include <cstring> // memcpy() +#include <initializer_list> // std::initializer_list +#include <utility> // std::index_sequence + +// Users may disable SIMD with SKNX_NO_SIMD, which may be set via compiler flags. +// The gn build has no option which sets SKNX_NO_SIMD. +// Use SKVX_USE_SIMD internally to avoid confusing double negation. +// Do not use 'defined' in a macro expansion. +#if !defined(SKNX_NO_SIMD) + #define SKVX_USE_SIMD 1 +#else + #define SKVX_USE_SIMD 0 +#endif + +#if SKVX_USE_SIMD + #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) + #include <immintrin.h> + #elif defined(__ARM_NEON) + #include <arm_neon.h> + #elif defined(__wasm_simd128__) + #include <wasm_simd128.h> + #endif +#endif + +// To avoid ODR violations, all methods must be force-inlined... +#if defined(_MSC_VER) + #define SKVX_ALWAYS_INLINE __forceinline +#else + #define SKVX_ALWAYS_INLINE __attribute__((always_inline)) +#endif + +// ... and all standalone functions must be static. Please use these helpers: +#define SI static inline +#define SIT template < typename T> SI +#define SIN template <int N > SI +#define SINT template <int N, typename T> SI +#define SINTU template <int N, typename T, typename U, \ + typename=std::enable_if_t<std::is_convertible<U,T>::value>> SI + +namespace skvx { + +template <int N, typename T> +struct alignas(N*sizeof(T)) Vec; + +template <int... Ix, int N, typename T> +SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>&); + +template <typename D, typename S> +SI D bit_pun(const S&); + +// All Vec have the same simple memory layout, the same as `T vec[N]`. +template <int N, typename T> +struct alignas(N*sizeof(T)) VecStorage { + SKVX_ALWAYS_INLINE VecStorage() = default; + SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {} + + Vec<N/2,T> lo, hi; +}; + +template <typename T> +struct VecStorage<4,T> { + SKVX_ALWAYS_INLINE VecStorage() = default; + SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {} + SKVX_ALWAYS_INLINE VecStorage(T x, T y, T z, T w) : lo(x,y), hi(z, w) {} + SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, T z, T w) : lo(xy), hi(z,w) {} + SKVX_ALWAYS_INLINE VecStorage(T x, T y, Vec<2,T> zw) : lo(x,y), hi(zw) {} + SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, Vec<2,T> zw) : lo(xy), hi(zw) {} + + SKVX_ALWAYS_INLINE Vec<2,T>& xy() { return lo; } + SKVX_ALWAYS_INLINE Vec<2,T>& zw() { return hi; } + SKVX_ALWAYS_INLINE T& x() { return lo.lo.val; } + SKVX_ALWAYS_INLINE T& y() { return lo.hi.val; } + SKVX_ALWAYS_INLINE T& z() { return hi.lo.val; } + SKVX_ALWAYS_INLINE T& w() { return hi.hi.val; } + + SKVX_ALWAYS_INLINE Vec<2,T> xy() const { return lo; } + SKVX_ALWAYS_INLINE Vec<2,T> zw() const { return hi; } + SKVX_ALWAYS_INLINE T x() const { return lo.lo.val; } + SKVX_ALWAYS_INLINE T y() const { return lo.hi.val; } + SKVX_ALWAYS_INLINE T z() const { return hi.lo.val; } + SKVX_ALWAYS_INLINE T w() const { return hi.hi.val; } + + // Exchange-based swizzles. These should take 1 cycle on NEON and 3 (pipelined) cycles on SSE. + SKVX_ALWAYS_INLINE Vec<4,T> yxwz() const { return shuffle<1,0,3,2>(bit_pun<Vec<4,T>>(*this)); } + SKVX_ALWAYS_INLINE Vec<4,T> zwxy() const { return shuffle<2,3,0,1>(bit_pun<Vec<4,T>>(*this)); } + + Vec<2,T> lo, hi; +}; + +template <typename T> +struct VecStorage<2,T> { + SKVX_ALWAYS_INLINE VecStorage() = default; + SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {} + SKVX_ALWAYS_INLINE VecStorage(T x, T y) : lo(x), hi(y) {} + + SKVX_ALWAYS_INLINE T& x() { return lo.val; } + SKVX_ALWAYS_INLINE T& y() { return hi.val; } + + SKVX_ALWAYS_INLINE T x() const { return lo.val; } + SKVX_ALWAYS_INLINE T y() const { return hi.val; } + + // This exchange-based swizzle should take 1 cycle on NEON and 3 (pipelined) cycles on SSE. + SKVX_ALWAYS_INLINE Vec<2,T> yx() const { return shuffle<1,0>(bit_pun<Vec<2,T>>(*this)); } + + SKVX_ALWAYS_INLINE Vec<4,T> xyxy() const { + return Vec<4,T>(bit_pun<Vec<2,T>>(*this), bit_pun<Vec<2,T>>(*this)); + } + + Vec<1,T> lo, hi; +}; + +template <int N, typename T> +struct alignas(N*sizeof(T)) Vec : public VecStorage<N,T> { + static_assert((N & (N-1)) == 0, "N must be a power of 2."); + static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?"); + + // Methods belong here in the class declaration of Vec only if: + // - they must be here, like constructors or operator[]; + // - they'll definitely never want a specialized implementation. + // Other operations on Vec should be defined outside the type. + + SKVX_ALWAYS_INLINE Vec() = default; + + using VecStorage<N,T>::VecStorage; + + SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) { + T vals[N] = {0}; + memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T)); + + this->lo = Vec<N/2,T>::Load(vals + 0); + this->hi = Vec<N/2,T>::Load(vals + N/2); + } + + SKVX_ALWAYS_INLINE T operator[](int i) const { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; } + SKVX_ALWAYS_INLINE T& operator[](int i) { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; } + + SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) { + Vec v; + memcpy(&v, ptr, sizeof(Vec)); + return v; + } + SKVX_ALWAYS_INLINE void store(void* ptr) const { + memcpy(ptr, this, sizeof(Vec)); + } +}; + +template <typename T> +struct Vec<1,T> { + T val; + + SKVX_ALWAYS_INLINE Vec() = default; + + Vec(T s) : val(s) {} + + SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {} + + SKVX_ALWAYS_INLINE T operator[](int) const { return val; } + SKVX_ALWAYS_INLINE T& operator[](int) { return val; } + + SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) { + Vec v; + memcpy(&v, ptr, sizeof(Vec)); + return v; + } + SKVX_ALWAYS_INLINE void store(void* ptr) const { + memcpy(ptr, this, sizeof(Vec)); + } +}; + +// Ideally we'd only use bit_pun(), but until this file is always built as C++17 with constexpr if, +// we'll sometimes find need to use unchecked_bit_pun(). Please do check the call sites yourself! +template <typename D, typename S> +SI D unchecked_bit_pun(const S& s) { + D d; + memcpy(&d, &s, sizeof(D)); + return d; +} + +template <typename D, typename S> +SI D bit_pun(const S& s) { + static_assert(sizeof(D) == sizeof(S), ""); + return unchecked_bit_pun<D>(s); +} + +// Translate from a value type T to its corresponding Mask, the result of a comparison. +template <typename T> struct Mask { using type = T; }; +template <> struct Mask<float > { using type = int32_t; }; +template <> struct Mask<double> { using type = int64_t; }; +template <typename T> using M = typename Mask<T>::type; + +// Join two Vec<N,T> into one Vec<2N,T>. +SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) { + Vec<2*N,T> v; + v.lo = lo; + v.hi = hi; + return v; +} + +// We have three strategies for implementing Vec operations: +// 1) lean on Clang/GCC vector extensions when available; +// 2) use map() to apply a scalar function lane-wise; +// 3) recurse on lo/hi to scalar portable implementations. +// We can slot in platform-specific implementations as overloads for particular Vec<N,T>, +// or often integrate them directly into the recursion of style 3), allowing fine control. + +#if SKVX_USE_SIMD && (defined(__clang__) || defined(__GNUC__)) + + // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly. + #if defined(__clang__) + template <int N, typename T> + using VExt = T __attribute__((ext_vector_type(N))); + + #elif defined(__GNUC__) + template <int N, typename T> + struct VExtHelper { + typedef T __attribute__((vector_size(N*sizeof(T)))) type; + }; + + template <int N, typename T> + using VExt = typename VExtHelper<N,T>::type; + + // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic + // to_vec<N,T>() below for N=4 and T=float. This workaround seems to help... + SI Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); } + #endif + + SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); } + SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); } + + SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) { + return to_vec<N,T>(to_vext(x) + to_vext(y)); + } + SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) { + return to_vec<N,T>(to_vext(x) - to_vext(y)); + } + SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) { + return to_vec<N,T>(to_vext(x) * to_vext(y)); + } + SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) { + return to_vec<N,T>(to_vext(x) / to_vext(y)); + } + + SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) { + return to_vec<N,T>(to_vext(x) ^ to_vext(y)); + } + SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) { + return to_vec<N,T>(to_vext(x) & to_vext(y)); + } + SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) { + return to_vec<N,T>(to_vext(x) | to_vext(y)); + } + + SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); } + SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); } + SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); } + + SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) << k); } + SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) >> k); } + + SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) { + return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y)); + } + SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) { + return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y)); + } + SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) { + return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y)); + } + SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) { + return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y)); + } + SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) { + return bit_pun<Vec<N,M<T>>>(to_vext(x) < to_vext(y)); + } + SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) { + return bit_pun<Vec<N,M<T>>>(to_vext(x) > to_vext(y)); + } + +#else + + // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available. + // We'll implement things portably with N==1 scalar implementations and recursion onto them. + + // N == 1 scalar implementations. + SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; } + SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; } + SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; } + SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; } + + SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; } + SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; } + SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; } + + SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; } + SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; } + SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; } + + SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; } + SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; } + + SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) { + return x.val == y.val ? ~0 : 0; + } + SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) { + return x.val != y.val ? ~0 : 0; + } + SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) { + return x.val <= y.val ? ~0 : 0; + } + SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) { + return x.val >= y.val ? ~0 : 0; + } + SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) { + return x.val < y.val ? ~0 : 0; + } + SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) { + return x.val > y.val ? ~0 : 0; + } + + // Recurse on lo/hi down to N==1 scalar implementations. + SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo + y.lo, x.hi + y.hi); + } + SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo - y.lo, x.hi - y.hi); + } + SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo * y.lo, x.hi * y.hi); + } + SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo / y.lo, x.hi / y.hi); + } + + SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo ^ y.lo, x.hi ^ y.hi); + } + SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo & y.lo, x.hi & y.hi); + } + SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo | y.lo, x.hi | y.hi); + } + + SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); } + SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); } + SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); } + + SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); } + SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); } + + SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo == y.lo, x.hi == y.hi); + } + SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo != y.lo, x.hi != y.hi); + } + SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo <= y.lo, x.hi <= y.hi); + } + SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo >= y.lo, x.hi >= y.hi); + } + SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo < y.lo, x.hi < y.hi); + } + SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) { + return join(x.lo > y.lo, x.hi > y.hi); + } +#endif + +// Scalar/vector operations splat the scalar to a vector. +SINTU Vec<N,T> operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) + y; } +SINTU Vec<N,T> operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) - y; } +SINTU Vec<N,T> operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) * y; } +SINTU Vec<N,T> operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) / y; } +SINTU Vec<N,T> operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^ y; } +SINTU Vec<N,T> operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) & y; } +SINTU Vec<N,T> operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) | y; } +SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; } +SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; } +SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; } +SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; } +SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) < y; } +SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) > y; } + +SINTU Vec<N,T> operator+ (const Vec<N,T>& x, U y) { return x + Vec<N,T>(y); } +SINTU Vec<N,T> operator- (const Vec<N,T>& x, U y) { return x - Vec<N,T>(y); } +SINTU Vec<N,T> operator* (const Vec<N,T>& x, U y) { return x * Vec<N,T>(y); } +SINTU Vec<N,T> operator/ (const Vec<N,T>& x, U y) { return x / Vec<N,T>(y); } +SINTU Vec<N,T> operator^ (const Vec<N,T>& x, U y) { return x ^ Vec<N,T>(y); } +SINTU Vec<N,T> operator& (const Vec<N,T>& x, U y) { return x & Vec<N,T>(y); } +SINTU Vec<N,T> operator| (const Vec<N,T>& x, U y) { return x | Vec<N,T>(y); } +SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); } +SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); } +SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); } +SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); } +SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x < Vec<N,T>(y); } +SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x > Vec<N,T>(y); } + +SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); } +SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); } +SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); } +SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); } +SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); } +SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); } +SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); } + +SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); } +SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); } +SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); } +SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); } +SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); } +SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); } +SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); } + +SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); } +SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); } + +// Some operations we want are not expressible with Clang/GCC vector extensions. + +// Clang can reason about naive_if_then_else() and optimize through it better +// than if_then_else(), so it's sometimes useful to call it directly when we +// think an entire expression should optimize away, e.g. min()/max(). +SINT Vec<N,T> naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) { + return bit_pun<Vec<N,T>>(( cond & bit_pun<Vec<N, M<T>>>(t)) | + (~cond & bit_pun<Vec<N, M<T>>>(e)) ); +} + +SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) { + // In practice this scalar implementation is unlikely to be used. See next if_then_else(). + return bit_pun<Vec<1,T>>(( cond & bit_pun<Vec<1, M<T>>>(t)) | + (~cond & bit_pun<Vec<1, M<T>>>(e)) ); +} +SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) { + // Specializations inline here so they can generalize what types the apply to. + // (This header is used in C++14 contexts, so we have to kind of fake constexpr if.) +#if SKVX_USE_SIMD && defined(__AVX2__) + if /*constexpr*/ (N*sizeof(T) == 32) { + return unchecked_bit_pun<Vec<N,T>>(_mm256_blendv_epi8(unchecked_bit_pun<__m256i>(e), + unchecked_bit_pun<__m256i>(t), + unchecked_bit_pun<__m256i>(cond))); + } +#endif +#if SKVX_USE_SIMD && defined(__SSE4_1__) + if /*constexpr*/ (N*sizeof(T) == 16) { + return unchecked_bit_pun<Vec<N,T>>(_mm_blendv_epi8(unchecked_bit_pun<__m128i>(e), + unchecked_bit_pun<__m128i>(t), + unchecked_bit_pun<__m128i>(cond))); + } +#endif +#if SKVX_USE_SIMD && defined(__ARM_NEON) + if /*constexpr*/ (N*sizeof(T) == 16) { + return unchecked_bit_pun<Vec<N,T>>(vbslq_u8(unchecked_bit_pun<uint8x16_t>(cond), + unchecked_bit_pun<uint8x16_t>(t), + unchecked_bit_pun<uint8x16_t>(e))); + } +#endif + // Recurse for large vectors to try to hit the specializations above. + if /*constexpr*/ (N*sizeof(T) > 16) { + return join(if_then_else(cond.lo, t.lo, e.lo), + if_then_else(cond.hi, t.hi, e.hi)); + } + // This default can lead to better code than the recursing onto scalars. + return naive_if_then_else(cond, t, e); +} + +SIT bool any(const Vec<1,T>& x) { return x.val != 0; } +SINT bool any(const Vec<N,T>& x) { +#if SKVX_USE_SIMD && defined(__wasm_simd128__) + if constexpr (N == 4 && sizeof(T) == 4) { + return wasm_i32x4_any_true(unchecked_bit_pun<VExt<4,int>>(x)); + } +#endif + return any(x.lo) + || any(x.hi); +} + +SIT bool all(const Vec<1,T>& x) { return x.val != 0; } +SINT bool all(const Vec<N,T>& x) { +#if SKVX_USE_SIMD && defined(__AVX2__) + if /*constexpr*/ (N*sizeof(T) == 32) { + return _mm256_testc_si256(unchecked_bit_pun<__m256i>(x), + _mm256_set1_epi32(-1)); + } +#endif +#if SKVX_USE_SIMD && defined(__SSE4_1__) + if /*constexpr*/ (N*sizeof(T) == 16) { + return _mm_testc_si128(unchecked_bit_pun<__m128i>(x), + _mm_set1_epi32(-1)); + } +#endif +#if SKVX_USE_SIMD && defined(__wasm_simd128__) + if /*constexpr*/ (N == 4 && sizeof(T) == 4) { + return wasm_i32x4_all_true(unchecked_bit_pun<VExt<4,int>>(x)); + } +#endif + return all(x.lo) + && all(x.hi); +} + +// cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane. +// TODO: implement with map()? +template <typename D, typename S> +SI Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; } + +template <typename D, int N, typename S> +SI Vec<N,D> cast(const Vec<N,S>& src) { +#if SKVX_USE_SIMD && defined(__clang__) + return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>)); +#else + return join(cast<D>(src.lo), cast<D>(src.hi)); +#endif +} + +// min/max match logic of std::min/std::max, which is important when NaN is involved. +SIT T min(const Vec<1,T>& x) { return x.val; } +SIT T max(const Vec<1,T>& x) { return x.val; } +SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); } +SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); } + +SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(y < x, y, x); } +SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(x < y, y, x); } + +SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); } +SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); } +SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); } +SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); } + +// pin matches the logic of SkTPin, which is important when NaN is involved. It always returns +// values in the range lo..hi, and if x is NaN, it returns lo. +SINT Vec<N,T> pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi) { + return max(lo, min(x, hi)); +} + +// Shuffle values from a vector pretty arbitrarily: +// skvx::Vec<4,float> rgba = {R,G,B,A}; +// shuffle<2,1,0,3> (rgba) ~> {B,G,R,A} +// shuffle<2,1> (rgba) ~> {B,G} +// shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G} +// shuffle<3,3,3,3> (rgba) ~> {A,A,A,A} +// The only real restriction is that the output also be a legal N=power-of-two sknx::Vec. +template <int... Ix, int N, typename T> +SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) { +#if SKVX_USE_SIMD && defined(__clang__) + // TODO: can we just always use { x[Ix]... }? + return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...)); +#else + return { x[Ix]... }; +#endif +} + +// Call map(fn, x) for a vector with fn() applied to each lane of x, { fn(x[0]), fn(x[1]), ... }, +// or map(fn, x,y) for a vector of fn(x[i], y[i]), etc. + +template <typename Fn, typename... Args, size_t... I> +SI auto map(std::index_sequence<I...>, + Fn&& fn, const Args&... args) -> skvx::Vec<sizeof...(I), decltype(fn(args[0]...))> { + auto lane = [&](size_t i) +#if defined(__clang__) + // CFI, specifically -fsanitize=cfi-icall, seems to give a false positive here, + // with errors like "control flow integrity check for type 'float (float) + // noexcept' failed during indirect function call... note: sqrtf.cfi_jt defined + // here". But we can be quite sure fn is the right type: it's all inferred! + // So, stifle CFI in this function. + __attribute__((no_sanitize("cfi"))) +#endif + { return fn(args[i]...); }; + + return { lane(I)... }; +} + +template <typename Fn, int N, typename T, typename... Rest> +auto map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest) { + // Derive an {0...N-1} index_sequence from the size of the first arg: N lanes in, N lanes out. + return map(std::make_index_sequence<N>{}, fn, first,rest...); +} + +SIN Vec<N,float> ceil(const Vec<N,float>& x) { return map( ceilf, x); } +SIN Vec<N,float> floor(const Vec<N,float>& x) { return map(floorf, x); } +SIN Vec<N,float> trunc(const Vec<N,float>& x) { return map(truncf, x); } +SIN Vec<N,float> round(const Vec<N,float>& x) { return map(roundf, x); } +SIN Vec<N,float> sqrt(const Vec<N,float>& x) { return map( sqrtf, x); } +SIN Vec<N,float> abs(const Vec<N,float>& x) { return map( fabsf, x); } +SIN Vec<N,float> fma(const Vec<N,float>& x, + const Vec<N,float>& y, + const Vec<N,float>& z) { + // I don't understand why Clang's codegen is terrible if we write map(fmaf, x,y,z) directly. + auto fn = [](float x, float y, float z) { return fmaf(x,y,z); }; + return map(fn, x,y,z); +} + +SI Vec<1,int> lrint(const Vec<1,float>& x) { + return (int)lrintf(x.val); +} +SIN Vec<N,int> lrint(const Vec<N,float>& x) { +#if SKVX_USE_SIMD && defined(__AVX__) + if /*constexpr*/ (N == 8) { + return unchecked_bit_pun<Vec<N,int>>(_mm256_cvtps_epi32(unchecked_bit_pun<__m256>(x))); + } +#endif +#if SKVX_USE_SIMD && defined(__SSE__) + if /*constexpr*/ (N == 4) { + return unchecked_bit_pun<Vec<N,int>>(_mm_cvtps_epi32(unchecked_bit_pun<__m128>(x))); + } +#endif + return join(lrint(x.lo), + lrint(x.hi)); +} + +SIN Vec<N,float> fract(const Vec<N,float>& x) { return x - floor(x); } + +// The default logic for to_half/from_half is borrowed from skcms, +// and assumes inputs are finite and treat/flush denorm half floats as/to zero. +// Key constants to watch for: +// - a float is 32-bit, 1-8-23 sign-exponent-mantissa, with 127 exponent bias; +// - a half is 16-bit, 1-5-10 sign-exponent-mantissa, with 15 exponent bias. +SIN Vec<N,uint16_t> to_half_finite_ftz(const Vec<N,float>& x) { + Vec<N,uint32_t> sem = bit_pun<Vec<N,uint32_t>>(x), + s = sem & 0x8000'0000, + em = sem ^ s, + is_denorm = em < 0x3880'0000; + return cast<uint16_t>(if_then_else(is_denorm, Vec<N,uint32_t>(0) + , (s>>16) + (em>>13) - ((127-15)<<10))); +} +SIN Vec<N,float> from_half_finite_ftz(const Vec<N,uint16_t>& x) { + Vec<N,uint32_t> wide = cast<uint32_t>(x), + s = wide & 0x8000, + em = wide ^ s; + auto is_denorm = bit_pun<Vec<N,int32_t>>(em < 0x0400); + return if_then_else(is_denorm, Vec<N,float>(0) + , bit_pun<Vec<N,float>>( (s<<16) + (em<<13) + ((127-15)<<23) )); +} + +// Like if_then_else(), these N=1 base cases won't actually be used unless explicitly called. +SI Vec<1,uint16_t> to_half(const Vec<1,float>& x) { return to_half_finite_ftz(x); } +SI Vec<1,float> from_half(const Vec<1,uint16_t>& x) { return from_half_finite_ftz(x); } + +SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) { +#if SKVX_USE_SIMD && defined(__F16C__) + if /*constexpr*/ (N == 8) { + return unchecked_bit_pun<Vec<N,uint16_t>>(_mm256_cvtps_ph(unchecked_bit_pun<__m256>(x), + _MM_FROUND_CUR_DIRECTION)); + } +#endif +#if SKVX_USE_SIMD && defined(__aarch64__) + if /*constexpr*/ (N == 4) { + return unchecked_bit_pun<Vec<N,uint16_t>>(vcvt_f16_f32(unchecked_bit_pun<float32x4_t>(x))); + + } +#endif + if /*constexpr*/ (N > 4) { + return join(to_half(x.lo), + to_half(x.hi)); + } + return to_half_finite_ftz(x); +} + +SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) { +#if SKVX_USE_SIMD && defined(__F16C__) + if /*constexpr*/ (N == 8) { + return unchecked_bit_pun<Vec<N,float>>(_mm256_cvtph_ps(unchecked_bit_pun<__m128i>(x))); + } +#endif +#if SKVX_USE_SIMD && defined(__aarch64__) + if /*constexpr*/ (N == 4) { + return unchecked_bit_pun<Vec<N,float>>(vcvt_f32_f16(unchecked_bit_pun<float16x4_t>(x))); + } +#endif + if /*constexpr*/ (N > 4) { + return join(from_half(x.lo), + from_half(x.hi)); + } + return from_half_finite_ftz(x); +} + +// div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit. +SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) { + return cast<uint8_t>( (x+127)/255 ); +} + +// approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit, +// and is always perfect when x or y is 0 or 255. +SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) { + // All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above. + // We happen to have historically picked (x*y+x)/256. + auto X = cast<uint16_t>(x), + Y = cast<uint16_t>(y); + return cast<uint8_t>( (X*Y+X)/256 ); +} + +// The ScaledDividerU32 takes a divisor > 1, and creates a function divide(numerator) that +// calculates a numerator / denominator. For this to be rounded properly, numerator should have +// half added in: +// divide(numerator + half) == floor(numerator/denominator + 1/2). +// +// This gives an answer within +/- 1 from the true value. +// +// Derivation of half: +// numerator/denominator + 1/2 = (numerator + half) / d +// numerator + denominator / 2 = numerator + half +// half = denominator / 2. +// +// Because half is divided by 2, that division must also be rounded. +// half == denominator / 2 = (denominator + 1) / 2. +// +// The divisorFactor is just a scaled value: +// divisorFactor = (1 / divisor) * 2 ^ 32. +// The maximum that can be divided and rounded is UINT_MAX - half. +class ScaledDividerU32 { +public: + explicit ScaledDividerU32(uint32_t divisor) + : fDivisorFactor{(uint32_t)(std::round((1.0 / divisor) * (1ull << 32)))} + , fHalf{(divisor + 1) >> 1} { + assert(divisor > 1); + } + + Vec<4, uint32_t> divide(const Vec<4, uint32_t>& numerator) const { +#if SKVX_USE_SIMD && defined(__ARM_NEON) + uint64x2_t hi = vmull_n_u32(vget_high_u32(to_vext(numerator)), fDivisorFactor); + uint64x2_t lo = vmull_n_u32(vget_low_u32(to_vext(numerator)), fDivisorFactor); + + return to_vec<4, uint32_t>(vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32))); +#else + return cast<uint32_t>((cast<uint64_t>(numerator) * fDivisorFactor) >> 32); +#endif + } + + uint32_t half() const { return fHalf; } + +private: + const uint32_t fDivisorFactor; + const uint32_t fHalf; +}; + +#if SKVX_USE_SIMD && defined(__ARM_NEON) +// With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long). +SI Vec<8,uint16_t> mull(const Vec<8,uint8_t>& x, + const Vec<8,uint8_t>& y) { + return to_vec<8,uint16_t>(vmull_u8(to_vext(x), + to_vext(y))); +} + +SIN std::enable_if_t<(N < 8), Vec<N,uint16_t>> mull(const Vec<N,uint8_t>& x, + const Vec<N,uint8_t>& y) { + // N < 8 --> double up data until N == 8, returning the part we need. + return mull(join(x,x), + join(y,y)).lo; +} + +SIN std::enable_if_t<(N > 8), Vec<N,uint16_t>> mull(const Vec<N,uint8_t>& x, + const Vec<N,uint8_t>& y) { + // N > 8 --> usual join(lo,hi) strategy to recurse down to N == 8. + return join(mull(x.lo, y.lo), + mull(x.hi, y.hi)); +} + +#else + +// Nothing special when we don't have NEON... just cast up to 16-bit and multiply. +SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x, + const Vec<N,uint8_t>& y) { + return cast<uint16_t>(x) + * cast<uint16_t>(y); +} +#endif + +// Allow floating point contraction. e.g., allow a*x + y to be compiled to a single FMA even though +// it introduces LSB differences on platforms that don't have an FMA instruction. +#if defined(__clang__) +#pragma STDC FP_CONTRACT ON +#endif + +// Approximates the inverse cosine of x within 0.96 degrees using the rational polynomial: +// +// acos(x) ~= (bx^3 + ax) / (dx^4 + cx^2 + 1) + pi/2 +// +// See: https://stackoverflow.com/a/36387954 +// +// For a proof of max error, see the "SkVx_approx_acos" unit test. +// +// NOTE: This function deviates immediately from pi and 0 outside -1 and 1. (The derivatives are +// infinite at -1 and 1). So the input must still be clamped between -1 and 1. +#define SKVX_APPROX_ACOS_MAX_ERROR SkDegreesToRadians(.96f) +SIN Vec<N,float> approx_acos(Vec<N,float> x) { + constexpr static float a = -0.939115566365855f; + constexpr static float b = 0.9217841528914573f; + constexpr static float c = -1.2845906244690837f; + constexpr static float d = 0.295624144969963174f; + constexpr static float pi_over_2 = 1.5707963267948966f; + auto xx = x*x; + auto numer = b*xx + a; + auto denom = xx*(d*xx + c) + 1; + return x * (numer/denom) + pi_over_2; +} + +#if defined(__clang__) +#pragma STDC FP_CONTRACT DEFAULT +#endif + +// De-interleaving load of 4 vectors. +// +// WARNING: These are really only supported well on NEON. Consider restructuring your data before +// resorting to these methods. +SIT void strided_load4(const T* v, + skvx::Vec<1,T>& a, + skvx::Vec<1,T>& b, + skvx::Vec<1,T>& c, + skvx::Vec<1,T>& d) { + a.val = v[0]; + b.val = v[1]; + c.val = v[2]; + d.val = v[3]; +} +SINT void strided_load4(const T* v, + skvx::Vec<N,T>& a, + skvx::Vec<N,T>& b, + skvx::Vec<N,T>& c, + skvx::Vec<N,T>& d) { + strided_load4(v, a.lo, b.lo, c.lo, d.lo); + strided_load4(v + 4*(N/2), a.hi, b.hi, c.hi, d.hi); +} +#if SKVX_USE_SIMD && defined(__ARM_NEON) +#define IMPL_LOAD4_TRANSPOSED(N, T, VLD) \ +SI void strided_load4(const T* v, \ + skvx::Vec<N,T>& a, \ + skvx::Vec<N,T>& b, \ + skvx::Vec<N,T>& c, \ + skvx::Vec<N,T>& d) { \ + auto mat = VLD(v); \ + a = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[0]); \ + b = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[1]); \ + c = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[2]); \ + d = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[3]); \ +} +IMPL_LOAD4_TRANSPOSED(2, uint32_t, vld4_u32); +IMPL_LOAD4_TRANSPOSED(4, uint16_t, vld4_u16); +IMPL_LOAD4_TRANSPOSED(8, uint8_t, vld4_u8); +IMPL_LOAD4_TRANSPOSED(2, int32_t, vld4_s32); +IMPL_LOAD4_TRANSPOSED(4, int16_t, vld4_s16); +IMPL_LOAD4_TRANSPOSED(8, int8_t, vld4_s8); +IMPL_LOAD4_TRANSPOSED(2, float, vld4_f32); +IMPL_LOAD4_TRANSPOSED(4, uint32_t, vld4q_u32); +IMPL_LOAD4_TRANSPOSED(8, uint16_t, vld4q_u16); +IMPL_LOAD4_TRANSPOSED(16, uint8_t, vld4q_u8); +IMPL_LOAD4_TRANSPOSED(4, int32_t, vld4q_s32); +IMPL_LOAD4_TRANSPOSED(8, int16_t, vld4q_s16); +IMPL_LOAD4_TRANSPOSED(16, int8_t, vld4q_s8); +IMPL_LOAD4_TRANSPOSED(4, float, vld4q_f32); +#undef IMPL_LOAD4_TRANSPOSED + +#elif SKVX_USE_SIMD && defined(__SSE__) + +SI void strided_load4(const float* v, + Vec<4,float>& a, + Vec<4,float>& b, + Vec<4,float>& c, + Vec<4,float>& d) { + using skvx::bit_pun; + __m128 a_ = _mm_loadu_ps(v); + __m128 b_ = _mm_loadu_ps(v+4); + __m128 c_ = _mm_loadu_ps(v+8); + __m128 d_ = _mm_loadu_ps(v+12); + _MM_TRANSPOSE4_PS(a_, b_, c_, d_); + a = bit_pun<Vec<4,float>>(a_); + b = bit_pun<Vec<4,float>>(b_); + c = bit_pun<Vec<4,float>>(c_); + d = bit_pun<Vec<4,float>>(d_); +} +#endif + +// De-interleaving load of 2 vectors. +// +// WARNING: These are really only supported well on NEON. Consider restructuring your data before +// resorting to these methods. +SIT void strided_load2(const T* v, skvx::Vec<1,T>& a, skvx::Vec<1,T>& b) { + a.val = v[0]; + b.val = v[1]; +} +SINT void strided_load2(const T* v, skvx::Vec<N,T>& a, skvx::Vec<N,T>& b) { + strided_load2(v, a.lo, b.lo); + strided_load2(v + 2*(N/2), a.hi, b.hi); +} +#if SKVX_USE_SIMD && defined(__ARM_NEON) +#define IMPL_LOAD2_TRANSPOSED(N, T, VLD) \ +SI void strided_load2(const T* v, skvx::Vec<N,T>& a, skvx::Vec<N,T>& b) { \ + auto mat = VLD(v); \ + a = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[0]); \ + b = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[1]); \ +} +IMPL_LOAD2_TRANSPOSED(2, uint32_t, vld2_u32); +IMPL_LOAD2_TRANSPOSED(4, uint16_t, vld2_u16); +IMPL_LOAD2_TRANSPOSED(8, uint8_t, vld2_u8); +IMPL_LOAD2_TRANSPOSED(2, int32_t, vld2_s32); +IMPL_LOAD2_TRANSPOSED(4, int16_t, vld2_s16); +IMPL_LOAD2_TRANSPOSED(8, int8_t, vld2_s8); +IMPL_LOAD2_TRANSPOSED(2, float, vld2_f32); +IMPL_LOAD2_TRANSPOSED(4, uint32_t, vld2q_u32); +IMPL_LOAD2_TRANSPOSED(8, uint16_t, vld2q_u16); +IMPL_LOAD2_TRANSPOSED(16, uint8_t, vld2q_u8); +IMPL_LOAD2_TRANSPOSED(4, int32_t, vld2q_s32); +IMPL_LOAD2_TRANSPOSED(8, int16_t, vld2q_s16); +IMPL_LOAD2_TRANSPOSED(16, int8_t, vld2q_s8); +IMPL_LOAD2_TRANSPOSED(4, float, vld2q_f32); +#undef IMPL_LOAD2_TRANSPOSED +#endif + +} // namespace skvx + +#undef SINTU +#undef SINT +#undef SIN +#undef SIT +#undef SI +#undef SKVX_ALWAYS_INLINE +#undef SKVX_USE_SIMD + +#endif//SKVX_DEFINED diff --git a/src/deps/skia/include/private/SkWeakRefCnt.h b/src/deps/skia/include/private/SkWeakRefCnt.h new file mode 100644 index 000000000..2b577342f --- /dev/null +++ b/src/deps/skia/include/private/SkWeakRefCnt.h @@ -0,0 +1,170 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkWeakRefCnt_DEFINED +#define SkWeakRefCnt_DEFINED + +#include "include/core/SkRefCnt.h" +#include <atomic> + +/** \class SkWeakRefCnt + + SkWeakRefCnt is the base class for objects that may be shared by multiple + objects. When an existing strong owner wants to share a reference, it calls + ref(). When a strong owner wants to release its reference, it calls + unref(). When the shared object's strong reference count goes to zero as + the result of an unref() call, its (virtual) weak_dispose method is called. + It is an error for the destructor to be called explicitly (or via the + object going out of scope on the stack or calling delete) if + getRefCnt() > 1. + + In addition to strong ownership, an owner may instead obtain a weak + reference by calling weak_ref(). A call to weak_ref() must be balanced by a + call to weak_unref(). To obtain a strong reference from a weak reference, + call try_ref(). If try_ref() returns true, the owner's pointer is now also + a strong reference on which unref() must be called. Note that this does not + affect the original weak reference, weak_unref() must still be called. When + the weak reference count goes to zero, the object is deleted. While the + weak reference count is positive and the strong reference count is zero the + object still exists, but will be in the disposed state. It is up to the + object to define what this means. + + Note that a strong reference implicitly implies a weak reference. As a + result, it is allowable for the owner of a strong ref to call try_ref(). + This will have the same effect as calling ref(), but may be more expensive. + + Example: + + SkWeakRefCnt myRef = strongRef.weak_ref(); + ... // strongRef.unref() may or may not be called + if (myRef.try_ref()) { + ... // use myRef + myRef.unref(); + } else { + // myRef is in the disposed state + } + myRef.weak_unref(); +*/ +class SK_API SkWeakRefCnt : public SkRefCnt { +public: + /** Default construct, initializing the reference counts to 1. + The strong references collectively hold one weak reference. When the + strong reference count goes to zero, the collectively held weak + reference is released. + */ + SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {} + + /** Destruct, asserting that the weak reference count is 1. + */ + ~SkWeakRefCnt() override { +#ifdef SK_DEBUG + SkASSERT(getWeakCnt() == 1); + fWeakCnt.store(0, std::memory_order_relaxed); +#endif + } + +#ifdef SK_DEBUG + /** Return the weak reference count. */ + int32_t getWeakCnt() const { + return fWeakCnt.load(std::memory_order_relaxed); + } +#endif + +private: + /** If fRefCnt is 0, returns 0. + * Otherwise increments fRefCnt, acquires, and returns the old value. + */ + int32_t atomic_conditional_acquire_strong_ref() const { + int32_t prev = fRefCnt.load(std::memory_order_relaxed); + do { + if (0 == prev) { + break; + } + } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire, + std::memory_order_relaxed)); + return prev; + } + +public: + /** Creates a strong reference from a weak reference, if possible. The + caller must already be an owner. If try_ref() returns true the owner + is in posession of an additional strong reference. Both the original + reference and new reference must be properly unreferenced. If try_ref() + returns false, no strong reference could be created and the owner's + reference is in the same state as before the call. + */ + bool SK_WARN_UNUSED_RESULT try_ref() const { + if (atomic_conditional_acquire_strong_ref() != 0) { + // Acquire barrier (L/SL), if not provided above. + // Prevents subsequent code from happening before the increment. + return true; + } + return false; + } + + /** Increment the weak reference count. Must be balanced by a call to + weak_unref(). + */ + void weak_ref() const { + SkASSERT(getRefCnt() > 0); + SkASSERT(getWeakCnt() > 0); + // No barrier required. + (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed); + } + + /** Decrement the weak reference count. If the weak reference count is 1 + before the decrement, then call delete on the object. Note that if this + is the case, then the object needs to have been allocated via new, and + not on the stack. + */ + void weak_unref() const { + SkASSERT(getWeakCnt() > 0); + // A release here acts in place of all releases we "should" have been doing in ref(). + if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) { + // Like try_ref(), the acquire is only needed on success, to make sure + // code in internal_dispose() doesn't happen before the decrement. +#ifdef SK_DEBUG + // so our destructor won't complain + fWeakCnt.store(1, std::memory_order_relaxed); +#endif + this->INHERITED::internal_dispose(); + } + } + + /** Returns true if there are no strong references to the object. When this + is the case all future calls to try_ref() will return false. + */ + bool weak_expired() const { + return fRefCnt.load(std::memory_order_relaxed) == 0; + } + +protected: + /** Called when the strong reference count goes to zero. This allows the + object to free any resources it may be holding. Weak references may + still exist and their level of allowed access to the object is defined + by the object's class. + */ + virtual void weak_dispose() const { + } + +private: + /** Called when the strong reference count goes to zero. Calls weak_dispose + on the object and releases the implicit weak reference held + collectively by the strong references. + */ + void internal_dispose() const override { + weak_dispose(); + weak_unref(); + } + + /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */ + mutable std::atomic<int32_t> fWeakCnt; + + using INHERITED = SkRefCnt; +}; + +#endif diff --git a/src/deps/skia/include/private/chromium/BUILD.bazel b/src/deps/skia/include/private/chromium/BUILD.bazel new file mode 100644 index 000000000..8633eae54 --- /dev/null +++ b/src/deps/skia/include/private/chromium/BUILD.bazel @@ -0,0 +1,22 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "GrSlug_hdr", + hdrs = ["GrSlug.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkRect_hdr", + "//include/core:SkRefCnt_hdr", + ], +) + +generated_cc_atom( + name = "SkChromeRemoteGlyphCache_hdr", + hdrs = ["SkChromeRemoteGlyphCache.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkData_hdr", + "//include/core:SkRefCnt_hdr", + "//include/utils:SkNoDrawCanvas_hdr", + ], +) diff --git a/src/deps/skia/include/private/chromium/GrSlug.h b/src/deps/skia/include/private/chromium/GrSlug.h new file mode 100644 index 000000000..8adbff45c --- /dev/null +++ b/src/deps/skia/include/private/chromium/GrSlug.h @@ -0,0 +1,41 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrSlug_DEFINED +#define GrSlug_DEFINED + +#include "include/core/SkRect.h" +#include "include/core/SkRefCnt.h" + +class SkCanvas; +class SkPaint; +class SkTextBlob; + +// You can use GrSlug to simulate drawTextBlob by defining the following at compile time. +// SK_EXPERIMENTAL_SIMULATE_DRAWGLYPHRUNLIST_WITH_SLUG +// For Skia, add this to your args.gn file. +// extra_cflags = ["-D", "SK_EXPERIMENTAL_SIMULATE_DRAWGLYPHRUNLIST_WITH_SLUG"] + +// GrSlug encapsulates an SkTextBlob at a specific origin, using a specific paint. It can be +// manipulated using matrix and clip changes to the canvas. If the canvas is transformed, then +// the GrSlug will also transform with smaller glyphs using bi-linear interpolation to render. You +// can think of a GrSlug as making a rubber stamp out of a SkTextBlob. +class SK_API GrSlug : public SkRefCnt { +public: + ~GrSlug() override; + // Return nullptr if the blob would not draw. This is not because of clipping, but because of + // some paint optimization. The GrSlug is captured as if drawn using drawTextBlob. + static sk_sp<GrSlug> ConvertBlob( + SkCanvas* canvas, const SkTextBlob& blob, SkPoint origin, const SkPaint& paint); + + // Draw the GrSlug obeying the canvas's mapping and clipping. + void draw(SkCanvas* canvas); + + virtual SkRect sourceBounds() const = 0; + virtual const SkPaint& paint() const = 0; +}; +#endif // GrSlug_DEFINED diff --git a/src/deps/skia/include/private/chromium/SkChromeRemoteGlyphCache.h b/src/deps/skia/include/private/chromium/SkChromeRemoteGlyphCache.h new file mode 100644 index 000000000..033b03fe6 --- /dev/null +++ b/src/deps/skia/include/private/chromium/SkChromeRemoteGlyphCache.h @@ -0,0 +1,143 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkChromeRemoteGlyphCache_DEFINED +#define SkChromeRemoteGlyphCache_DEFINED + +#include <memory> +#include <vector> + +#include "include/core/SkData.h" +#include "include/core/SkRefCnt.h" +#include "include/utils/SkNoDrawCanvas.h" + +class SkAutoDescriptor; +struct SkPackedGlyphID; +class SkStrikeCache; +class SkStrikeClientImpl; +class SkStrikeServer; +class SkStrikeServerImpl; +class SkTypeface; + +using SkDiscardableHandleId = uint32_t; +// This class is not thread-safe. +class SkStrikeServer { +public: + // An interface used by the server to create handles for pinning SkStrike + // entries on the remote client. + class DiscardableHandleManager { + public: + SK_SPI virtual ~DiscardableHandleManager() = default; + + // Creates a new *locked* handle and returns a unique ID that can be used to identify + // it on the remote client. + SK_SPI virtual SkDiscardableHandleId createHandle() = 0; + + // Returns true if the handle could be successfully locked. The server can + // assume it will remain locked until the next set of serialized entries is + // pulled from the SkStrikeServer. + // If returns false, the cache entry mapped to the handle has been deleted + // on the client. Any subsequent attempts to lock the same handle are not + // allowed. + SK_SPI virtual bool lockHandle(SkDiscardableHandleId) = 0; + + // Returns true if a handle has been deleted on the remote client. It is + // invalid to use a handle id again with this manager once this returns true. + SK_SPI virtual bool isHandleDeleted(SkDiscardableHandleId) = 0; + }; + + SK_SPI explicit SkStrikeServer(DiscardableHandleManager* discardableHandleManager); + SK_SPI ~SkStrikeServer(); + + // Create an analysis SkCanvas used to populate the SkStrikeServer with ops + // which will be serialized and rendered using the SkStrikeClient. + SK_API std::unique_ptr<SkCanvas> makeAnalysisCanvas(int width, int height, + const SkSurfaceProps& props, + sk_sp<SkColorSpace> colorSpace, + bool DFTSupport); + + // Serializes the typeface to be transmitted using this server. + SK_SPI sk_sp<SkData> serializeTypeface(SkTypeface*); + + // Serializes the strike data captured using a canvas returned by ::makeAnalysisCanvas. Any + // handles locked using the DiscardableHandleManager will be assumed to be + // unlocked after this call. + SK_SPI void writeStrikeData(std::vector<uint8_t>* memory); + + // Testing helpers + void setMaxEntriesInDescriptorMapForTesting(size_t count); + size_t remoteStrikeMapSizeForTesting() const; + +private: + SkStrikeServerImpl* impl(); + + std::unique_ptr<SkStrikeServerImpl> fImpl; +}; + +class SkStrikeClient { +public: + // This enum is used in histogram reporting in chromium. Please don't re-order the list of + // entries, and consider it to be append-only. + enum CacheMissType : uint32_t { + // Hard failures where no fallback could be found. + kFontMetrics = 0, + kGlyphMetrics = 1, + kGlyphImage = 2, + kGlyphPath = 3, + + // (DEPRECATED) The original glyph could not be found and a fallback was used. + kGlyphMetricsFallback = 4, + kGlyphPathFallback = 5, + + kLast = kGlyphPath + }; + + // An interface to delete handles that may be pinned by the remote server. + class DiscardableHandleManager : public SkRefCnt { + public: + ~DiscardableHandleManager() override = default; + + // Returns true if the handle was unlocked and can be safely deleted. Once + // successful, subsequent attempts to delete the same handle are invalid. + virtual bool deleteHandle(SkDiscardableHandleId) = 0; + + virtual void notifyCacheMiss(CacheMissType type, int fontSize) = 0; + + struct ReadFailureData { + size_t memorySize; + size_t bytesRead; + uint64_t typefaceSize; + uint64_t strikeCount; + uint64_t glyphImagesCount; + uint64_t glyphPathsCount; + }; + virtual void notifyReadFailure(const ReadFailureData& data) {} + }; + + SK_SPI explicit SkStrikeClient(sk_sp<DiscardableHandleManager>, + bool isLogging = true, + SkStrikeCache* strikeCache = nullptr); + SK_SPI ~SkStrikeClient(); + + // Deserializes the typeface previously serialized using the SkStrikeServer. Returns null if the + // data is invalid. + SK_SPI sk_sp<SkTypeface> deserializeTypeface(const void* data, size_t length); + + // Deserializes the strike data from a SkStrikeServer. All messages generated + // from a server when serializing the ops must be deserialized before the op + // is rasterized. + // Returns false if the data is invalid. + SK_SPI bool readStrikeData(const volatile void* memory, size_t memorySize); + +private: + std::unique_ptr<SkStrikeClientImpl> fImpl; +}; + +// For exposure to fuzzing only. +bool SkFuzzDeserializeSkDescriptor(sk_sp<SkData> bytes, SkAutoDescriptor* ad); + +#endif // SkChromeRemoteGlyphCache_DEFINED diff --git a/src/deps/skia/include/sksl/BUILD.bazel b/src/deps/skia/include/sksl/BUILD.bazel new file mode 100644 index 000000000..2d20d6d55 --- /dev/null +++ b/src/deps/skia/include/sksl/BUILD.bazel @@ -0,0 +1,176 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "DSLBlock_hdr", + hdrs = ["DSLBlock.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLExpression_hdr", + ":DSLStatement_hdr", + "//include/private:SkSLDefines_hdr", + ], +) + +generated_cc_atom( + name = "DSLCase_hdr", + hdrs = ["DSLCase.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLExpression_hdr", + ":DSLStatement_hdr", + "//include/private:SkSLDefines_hdr", + ], +) + +generated_cc_atom( + name = "DSLCore_hdr", + hdrs = ["DSLCore.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLBlock_hdr", + ":DSLCase_hdr", + ":DSLExpression_hdr", + ":DSLFunction_hdr", + ":DSLStatement_hdr", + ":DSLType_hdr", + ":DSLVar_hdr", + ":DSLWrapper_hdr", + ":SkSLErrorReporter_hdr", + "//include/private:SkSLProgramKind_hdr", + "//include/private:SkTArray_hdr", + ], +) + +generated_cc_atom( + name = "DSLExpression_hdr", + hdrs = ["DSLExpression.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLWrapper_hdr", + ":SkSLErrorReporter_hdr", + "//include/core:SkStringView_hdr", + "//include/core:SkTypes_hdr", + "//include/private:SkTArray_hdr", + ], +) + +generated_cc_atom( + name = "DSLFunction_hdr", + hdrs = ["DSLFunction.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLBlock_hdr", + ":DSLExpression_hdr", + ":DSLType_hdr", + ":DSLVar_hdr", + ":DSLWrapper_hdr", + ], +) + +generated_cc_atom( + name = "DSLLayout_hdr", + hdrs = ["DSLLayout.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkSLErrorReporter_hdr", + "//include/private:SkSLLayout_hdr", + ], +) + +generated_cc_atom( + name = "DSLModifiers_hdr", + hdrs = ["DSLModifiers.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLLayout_hdr", + "//include/core:SkSpan_hdr", + "//include/private:SkSLModifiers_hdr", + ], +) + +generated_cc_atom( + name = "DSLRuntimeEffects_hdr", + hdrs = ["DSLRuntimeEffects.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSL_hdr", + "//include/effects:SkRuntimeEffect_hdr", + ], +) + +generated_cc_atom( + name = "DSLStatement_hdr", + hdrs = ["DSLStatement.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkSLErrorReporter_hdr", + "//include/core:SkString_hdr", + "//include/core:SkTypes_hdr", + "//include/private:SkSLStatement_hdr", + ], +) + +generated_cc_atom( + name = "DSLSymbols_hdr", + hdrs = ["DSLSymbols.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLExpression_hdr", + "//include/core:SkStringView_hdr", + "//include/private:SkSLString_hdr", + ], +) + +generated_cc_atom( + name = "DSLType_hdr", + hdrs = ["DSLType.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLExpression_hdr", + ":DSLModifiers_hdr", + "//include/core:SkSpan_hdr", + "//include/private:SkSLString_hdr", + ], +) + +generated_cc_atom( + name = "DSLVar_hdr", + hdrs = ["DSLVar.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":DSLExpression_hdr", + ":DSLModifiers_hdr", + ":DSLType_hdr", + ], +) + +generated_cc_atom( + name = "DSLWrapper_hdr", + hdrs = ["DSLWrapper.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "DSL_hdr", + hdrs = ["DSL.h"], + visibility = ["//:__subpackages__"], + deps = [":DSLCore_hdr"], +) + +generated_cc_atom( + name = "SkSLErrorReporter_hdr", + hdrs = ["SkSLErrorReporter.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkStringView_hdr", + "//include/core:SkTypes_hdr", + "//include/private:SkSLString_hdr", + ], +) + +generated_cc_atom( + name = "SkSLDebugTrace_hdr", + hdrs = ["SkSLDebugTrace.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkRefCnt_hdr"], +) diff --git a/src/deps/skia/include/sksl/DSL.h b/src/deps/skia/include/sksl/DSL.h new file mode 100644 index 000000000..a8c8f5285 --- /dev/null +++ b/src/deps/skia/include/sksl/DSL.h @@ -0,0 +1,34 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL +#define SKSL_DSL + +#include "include/sksl/DSLCore.h" + +namespace SkSL { + +namespace dsl { + +using Block = DSLBlock; +using Case = DSLCase; +using Expression = DSLExpression; +using Field = DSLField; +using Function = DSLFunction; +using GlobalVar = DSLGlobalVar; +using Layout = DSLLayout; +using Modifiers = DSLModifiers; +using Parameter = DSLParameter; +using Statement = DSLStatement; +using Var = DSLVar; +template<typename T> using Wrapper = DSLWrapper<T>; + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLBlock.h b/src/deps/skia/include/sksl/DSLBlock.h new file mode 100644 index 000000000..3fcbb34c8 --- /dev/null +++ b/src/deps/skia/include/sksl/DSLBlock.h @@ -0,0 +1,67 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_BLOCK +#define SKSL_DSL_BLOCK + +#include "include/private/SkSLDefines.h" +#include "include/sksl/DSLExpression.h" +#include "include/sksl/DSLStatement.h" + +#include <memory> + +namespace SkSL { + +class Block; +class SymbolTable; + +namespace dsl { + +class DSLBlock { +public: + template<class... Statements> + DSLBlock(Statements... statements) { + fStatements.reserve_back(sizeof...(statements)); + // in C++17, we could just do: + // (fStatements.push_back(DSLStatement(statements.release()).release()), ...); + int unused[] = + {0, + (static_cast<void>(fStatements.push_back(DSLStatement(statements.release()).release())), + 0)...}; + static_cast<void>(unused); + } + + DSLBlock(DSLBlock&& other) = default; + + DSLBlock(SkSL::StatementArray statements, std::shared_ptr<SymbolTable> symbols = nullptr); + + DSLBlock(SkTArray<DSLStatement> statements, std::shared_ptr<SymbolTable> symbols = nullptr); + + ~DSLBlock(); + + DSLBlock& operator=(DSLBlock&& other) { + fStatements = std::move(other.fStatements); + return *this; + } + + void append(DSLStatement stmt); + + std::unique_ptr<SkSL::Block> release(); + +private: + SkSL::StatementArray fStatements; + std::shared_ptr<SkSL::SymbolTable> fSymbols; + + friend class DSLStatement; + friend class DSLFunction; +}; + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLCase.h b/src/deps/skia/include/sksl/DSLCase.h new file mode 100644 index 000000000..5b006e7e2 --- /dev/null +++ b/src/deps/skia/include/sksl/DSLCase.h @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_CASE +#define SKSL_DSL_CASE + +#include "include/private/SkSLDefines.h" +#include "include/sksl/DSLExpression.h" +#include "include/sksl/DSLStatement.h" + +#include <memory> + +namespace SkSL { + +class Statement; + +namespace dsl { + +class DSLCase { +public: + // An empty expression means 'default:'. + template<class... Statements> + DSLCase(DSLExpression value, Statements... statements) + : fValue(std::move(value)) { + fStatements.reserve_back(sizeof...(statements)); + // in C++17, we could just do: + // (fStatements.push_back(DSLStatement(std::move(statements)).release()), ...); + int unused[] = + {0, + (static_cast<void>(fStatements.push_back(DSLStatement(std::move(statements)).release())), + 0)...}; + static_cast<void>(unused); + } + + DSLCase(DSLExpression value, SkTArray<DSLStatement> statements, + PositionInfo info = PositionInfo::Capture()); + + DSLCase(DSLExpression value, SkSL::StatementArray statements, + PositionInfo info = PositionInfo::Capture()); + + DSLCase(DSLCase&&); + + ~DSLCase(); + + DSLCase& operator=(DSLCase&&); + + void append(DSLStatement stmt); + +private: + DSLExpression fValue; + SkSL::StatementArray fStatements; + PositionInfo fPosition; + + friend class DSLCore; + + template<class... Cases> + friend DSLPossibleStatement Switch(DSLExpression value, Cases... cases); +}; + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLCore.h b/src/deps/skia/include/sksl/DSLCore.h new file mode 100644 index 000000000..5363bda89 --- /dev/null +++ b/src/deps/skia/include/sksl/DSLCore.h @@ -0,0 +1,488 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_CORE +#define SKSL_DSL_CORE + +#include "include/private/SkSLProgramKind.h" +#include "include/private/SkTArray.h" +#include "include/sksl/DSLBlock.h" +#include "include/sksl/DSLCase.h" +#include "include/sksl/DSLExpression.h" +#include "include/sksl/DSLFunction.h" +#include "include/sksl/DSLStatement.h" +#include "include/sksl/DSLType.h" +#include "include/sksl/DSLVar.h" +#include "include/sksl/DSLWrapper.h" +#include "include/sksl/SkSLErrorReporter.h" + +namespace SkSL { + +class Compiler; +struct Program; +struct ProgramSettings; + +namespace dsl { + +// When users import the DSL namespace via `using namespace SkSL::dsl`, we want the SwizzleComponent +// Type enum to come into scope as well, so `Swizzle(var, X, Y, ONE)` can work as expected. +// `namespace SkSL::SwizzleComponent` contains only an `enum Type`; this `using namespace` directive +// shouldn't pollute the SkSL::dsl namespace with anything else. +using namespace SkSL::SwizzleComponent; + +/** + * Starts DSL output on the current thread using the specified compiler. This must be called + * prior to any other DSL functions. + */ +void Start(SkSL::Compiler* compiler, SkSL::ProgramKind kind = SkSL::ProgramKind::kFragment); + +void Start(SkSL::Compiler* compiler, SkSL::ProgramKind kind, const SkSL::ProgramSettings& settings); + +/** + * Signals the end of DSL output. This must be called sometime between a call to Start() and the + * termination of the thread. + */ +void End(); + +/** + * Returns all global elements (functions and global variables) as a self-contained Program. The + * optional source string is retained as the program's source. DSL programs do not normally have + * sources, but when a DSL program is produced from parsed program text (as in DSLParser), it may be + * important to retain it so that any skstd::string_views derived from it remain valid. + */ +std::unique_ptr<SkSL::Program> ReleaseProgram(std::unique_ptr<SkSL::String> source = nullptr); + +/** + * Returns the ErrorReporter which will be notified of any errors that occur during DSL calls. The + * default error reporter aborts on any error. + */ +ErrorReporter& GetErrorReporter(); + +/** + * Installs an ErrorReporter which will be notified of any errors that occur during DSL calls. + */ +void SetErrorReporter(ErrorReporter* errorReporter); + +DSLGlobalVar sk_FragColor(); + +DSLGlobalVar sk_FragCoord(); + +DSLExpression sk_Position(); + +/** + * #extension <name> : enable + */ +void AddExtension(skstd::string_view name, PositionInfo pos = PositionInfo::Capture()); + +/** + * break; + */ +DSLStatement Break(PositionInfo pos = PositionInfo::Capture()); + +/** + * continue; + */ +DSLStatement Continue(PositionInfo pos = PositionInfo::Capture()); + +/** + * Adds a modifiers declaration to the current program. + */ +void Declare(const DSLModifiers& modifiers, PositionInfo pos = PositionInfo::Capture()); + +/** + * Creates a local variable declaration statement. + */ +DSLStatement Declare(DSLVar& var, PositionInfo pos = PositionInfo::Capture()); + +/** + * Creates a local variable declaration statement containing multiple variables. + */ +DSLStatement Declare(SkTArray<DSLVar>& vars, PositionInfo pos = PositionInfo::Capture()); + +/** + * Declares a global variable. + */ +void Declare(DSLGlobalVar& var, PositionInfo pos = PositionInfo::Capture()); + +/** + * Declares a set of global variables. + */ +void Declare(SkTArray<DSLGlobalVar>& vars, PositionInfo pos = PositionInfo::Capture()); + +/** + * default: statements + */ +template<class... Statements> +DSLCase Default(Statements... statements) { + return DSLCase(DSLExpression(), std::move(statements)...); +} + +/** + * discard; + */ +DSLStatement Discard(PositionInfo pos = PositionInfo::Capture()); + +/** + * do stmt; while (test); + */ +DSLStatement Do(DSLStatement stmt, DSLExpression test, PositionInfo pos = PositionInfo::Capture()); + +/** + * for (initializer; test; next) stmt; + */ +DSLStatement For(DSLStatement initializer, DSLExpression test, DSLExpression next, + DSLStatement stmt, PositionInfo pos = PositionInfo::Capture()); + +/** + * if (test) ifTrue; [else ifFalse;] + */ +DSLStatement If(DSLExpression test, DSLStatement ifTrue, DSLStatement ifFalse = DSLStatement(), + PositionInfo pos = PositionInfo::Capture()); + +DSLGlobalVar InterfaceBlock(const DSLModifiers& modifiers, skstd::string_view typeName, + SkTArray<DSLField> fields, skstd::string_view varName = "", + int arraySize = 0, PositionInfo pos = PositionInfo::Capture()); + +/** + * return [value]; + */ +DSLStatement Return(DSLExpression value = DSLExpression(), + PositionInfo pos = PositionInfo::Capture()); + +/** + * test ? ifTrue : ifFalse + */ +DSLExpression Select(DSLExpression test, DSLExpression ifTrue, DSLExpression ifFalse, + PositionInfo info = PositionInfo::Capture()); + +DSLStatement StaticIf(DSLExpression test, DSLStatement ifTrue, + DSLStatement ifFalse = DSLStatement(), + PositionInfo pos = PositionInfo::Capture()); + +// Internal use only +DSLPossibleStatement PossibleStaticSwitch(DSLExpression value, SkTArray<DSLCase> cases); + +DSLStatement StaticSwitch(DSLExpression value, SkTArray<DSLCase> cases, + PositionInfo info = PositionInfo::Capture()); + +/** + * @switch (value) { cases } + */ +template<class... Cases> +DSLPossibleStatement StaticSwitch(DSLExpression value, Cases... cases) { + SkTArray<DSLCase> caseArray; + caseArray.reserve_back(sizeof...(cases)); + (caseArray.push_back(std::move(cases)), ...); + return PossibleStaticSwitch(std::move(value), std::move(caseArray)); +} + +// Internal use only +DSLPossibleStatement PossibleSwitch(DSLExpression value, SkTArray<DSLCase> cases); + +DSLStatement Switch(DSLExpression value, SkTArray<DSLCase> cases, + PositionInfo info = PositionInfo::Capture()); + +/** + * switch (value) { cases } + */ +template<class... Cases> +DSLPossibleStatement Switch(DSLExpression value, Cases... cases) { + SkTArray<DSLCase> caseArray; + caseArray.reserve_back(sizeof...(cases)); + (caseArray.push_back(std::move(cases)), ...); + return PossibleSwitch(std::move(value), std::move(caseArray)); +} + +/** + * while (test) stmt; + */ +DSLStatement While(DSLExpression test, DSLStatement stmt, + PositionInfo info = PositionInfo::Capture()); + +/** + * expression.xyz1 + */ +DSLExpression Swizzle(DSLExpression base, + SkSL::SwizzleComponent::Type a, + PositionInfo pos = PositionInfo::Capture()); + +DSLExpression Swizzle(DSLExpression base, + SkSL::SwizzleComponent::Type a, + SkSL::SwizzleComponent::Type b, + PositionInfo pos = PositionInfo::Capture()); + +DSLExpression Swizzle(DSLExpression base, + SkSL::SwizzleComponent::Type a, + SkSL::SwizzleComponent::Type b, + SkSL::SwizzleComponent::Type c, + PositionInfo pos = PositionInfo::Capture()); + +DSLExpression Swizzle(DSLExpression base, + SkSL::SwizzleComponent::Type a, + SkSL::SwizzleComponent::Type b, + SkSL::SwizzleComponent::Type c, + SkSL::SwizzleComponent::Type d, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the absolute value of x. If x is a vector, operates componentwise. + */ +DSLExpression Abs(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns true if all of the components of boolean vector x are true. + */ +DSLExpression All(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns true if any of the components of boolean vector x are true. + */ +DSLExpression Any(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the arctangent of y over x. Operates componentwise on vectors. + */ +DSLExpression Atan(DSLExpression y_over_x, PositionInfo pos = PositionInfo::Capture()); +DSLExpression Atan(DSLExpression y, DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x rounded towards positive infinity. If x is a vector, operates componentwise. + */ +DSLExpression Ceil(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x clamped to between min and max. If x is a vector, operates componentwise. + */ +DSLExpression Clamp(DSLExpression x, DSLExpression min, DSLExpression max, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the cosine of x. If x is a vector, operates componentwise. + */ +DSLExpression Cos(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the cross product of x and y. + */ +DSLExpression Cross(DSLExpression x, DSLExpression y, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x converted from radians to degrees. If x is a vector, operates componentwise. + */ +DSLExpression Degrees(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the distance between x and y. + */ +DSLExpression Distance(DSLExpression x, DSLExpression y, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the dot product of x and y. + */ +DSLExpression Dot(DSLExpression x, DSLExpression y, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns a boolean vector indicating whether components of x are equal to the corresponding + * components of y. + */ +DSLExpression Equal(DSLExpression x, DSLExpression y, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns e^x. If x is a vector, operates componentwise. + */ +DSLExpression Exp(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns 2^x. If x is a vector, operates componentwise. + */ +DSLExpression Exp2(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * If dot(i, nref) >= 0, returns n, otherwise returns -n. + */ +DSLExpression Faceforward(DSLExpression n, DSLExpression i, DSLExpression nref, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x rounded towards negative infinity. If x is a vector, operates componentwise. + */ +DSLExpression Floor(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the fractional part of x. If x is a vector, operates componentwise. + */ +DSLExpression Fract(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns a boolean vector indicating whether components of x are greater than the corresponding + * components of y. + */ +DSLExpression GreaterThan(DSLExpression x, DSLExpression y, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns a boolean vector indicating whether components of x are greater than or equal to the + * corresponding components of y. + */ +DSLExpression GreaterThanEqual(DSLExpression x, DSLExpression y, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the 1/sqrt(x). If x is a vector, operates componentwise. + */ +DSLExpression Inversesqrt(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the inverse of the matrix x. + */ +DSLExpression Inverse(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the length of the vector x. + */ +DSLExpression Length(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns a boolean vector indicating whether components of x are less than the corresponding + * components of y. + */ +DSLExpression LessThan(DSLExpression x, DSLExpression y, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns a boolean vector indicating whether components of x are less than or equal to the + * corresponding components of y. + */ +DSLExpression LessThanEqual(DSLExpression x, DSLExpression y, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the log base e of x. If x is a vector, operates componentwise. + */ +DSLExpression Log(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the log base 2 of x. If x is a vector, operates componentwise. + */ +DSLExpression Log2(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the larger (closer to positive infinity) of x and y. If x is a vector, operates + * componentwise. y may be either a vector of the same dimensions as x, or a scalar. + */ +DSLExpression Max(DSLExpression x, DSLExpression y, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the smaller (closer to negative infinity) of x and y. If x is a vector, operates + * componentwise. y may be either a vector of the same dimensions as x, or a scalar. + */ +DSLExpression Min(DSLExpression x, DSLExpression y, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns a linear intepolation between x and y at position a, where a=0 results in x and a=1 + * results in y. If x and y are vectors, operates componentwise. a may be either a vector of the + * same dimensions as x and y, or a scalar. + */ +DSLExpression Mix(DSLExpression x, DSLExpression y, DSLExpression a, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x modulo y. If x is a vector, operates componentwise. y may be either a vector of the + * same dimensions as x, or a scalar. + */ +DSLExpression Mod(DSLExpression x, DSLExpression y, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the vector x normalized to a length of 1. + */ +DSLExpression Normalize(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns a boolean vector indicating whether components of x are not equal to the corresponding + * components of y. + */ +DSLExpression NotEqual(DSLExpression x, DSLExpression y, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x raised to the power y. If x is a vector, operates componentwise. y may be either a + * vector of the same dimensions as x, or a scalar. + */ +DSLExpression Pow(DSLExpression x, DSLExpression y, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x converted from degrees to radians. If x is a vector, operates componentwise. + */ +DSLExpression Radians(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns i reflected from a surface with normal n. + */ +DSLExpression Reflect(DSLExpression i, DSLExpression n, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns i refracted across a surface with normal n and ratio of indices of refraction eta. + */ +DSLExpression Refract(DSLExpression i, DSLExpression n, DSLExpression eta, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x, rounded to the nearest integer. If x is a vector, operates componentwise. + */ +DSLExpression Round(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x clamped to the range [0, 1]. If x is a vector, operates componentwise. + */ +DSLExpression Saturate(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns -1, 0, or 1 depending on whether x is negative, zero, or positive, respectively. If x is + * a vector, operates componentwise. + */ +DSLExpression Sign(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the sine of x. If x is a vector, operates componentwise. + */ +DSLExpression Sin(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns a smooth interpolation between 0 (at x=edge1) and 1 (at x=edge2). If x is a vector, + * operates componentwise. edge1 and edge2 may either be both vectors of the same dimensions as x or + * scalars. + */ +DSLExpression Smoothstep(DSLExpression edge1, DSLExpression edge2, DSLExpression x, + PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the square root of x. If x is a vector, operates componentwise. + */ +DSLExpression Sqrt(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns 0 if x < edge or 1 if x >= edge. If x is a vector, operates componentwise. edge may be + * either a vector of the same dimensions as x, or a scalar. + */ +DSLExpression Step(DSLExpression edge, DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns the tangent of x. If x is a vector, operates componentwise. + */ +DSLExpression Tan(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns x converted from premultipled to unpremultiplied alpha. + */ +DSLExpression Unpremul(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLExpression.h b/src/deps/skia/include/sksl/DSLExpression.h new file mode 100644 index 000000000..9ee0f6d20 --- /dev/null +++ b/src/deps/skia/include/sksl/DSLExpression.h @@ -0,0 +1,299 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_EXPRESSION +#define SKSL_DSL_EXPRESSION + +#include "include/core/SkStringView.h" +#include "include/core/SkTypes.h" +#include "include/private/SkTArray.h" +#include "include/sksl/DSLWrapper.h" +#include "include/sksl/SkSLErrorReporter.h" + +#include <cstdint> +#include <memory> + +#if defined(__has_cpp_attribute) && __has_cpp_attribute(clang::reinitializes) +#define SK_CLANG_REINITIALIZES [[clang::reinitializes]] +#else +#define SK_CLANG_REINITIALIZES +#endif + +namespace SkSL { + +class Expression; +class Type; + +namespace dsl { + +class DSLPossibleExpression; +class DSLStatement; +class DSLType; +class DSLVarBase; + +/** + * Represents an expression such as 'cos(x)' or 'a + b'. + */ +class DSLExpression { +public: + DSLExpression(const DSLExpression&) = delete; + + DSLExpression(DSLExpression&&); + + DSLExpression(); + + /** + * Creates an expression representing a literal float. + */ + DSLExpression(float value, PositionInfo pos = PositionInfo::Capture()); + + /** + * Creates an expression representing a literal float. + */ + DSLExpression(double value, PositionInfo pos = PositionInfo::Capture()) + : DSLExpression((float) value) {} + + /** + * Creates an expression representing a literal int. + */ + DSLExpression(int value, PositionInfo pos = PositionInfo::Capture()); + + /** + * Creates an expression representing a literal int. + */ + DSLExpression(int64_t value, PositionInfo pos = PositionInfo::Capture()); + + /** + * Creates an expression representing a literal uint. + */ + DSLExpression(unsigned int value, PositionInfo pos = PositionInfo::Capture()); + + /** + * Creates an expression representing a literal bool. + */ + DSLExpression(bool value, PositionInfo pos = PositionInfo::Capture()); + + /** + * Creates an expression representing a variable reference. + */ + DSLExpression(DSLVarBase& var, PositionInfo pos = PositionInfo::Capture()); + + DSLExpression(DSLVarBase&& var, PositionInfo pos = PositionInfo::Capture()); + + DSLExpression(DSLPossibleExpression expr, PositionInfo pos = PositionInfo::Capture()); + + explicit DSLExpression(std::unique_ptr<SkSL::Expression> expression); + + static DSLExpression Poison(PositionInfo pos = PositionInfo::Capture()); + + ~DSLExpression(); + + DSLType type(); + + /** + * Overloads the '=' operator to create an SkSL assignment statement. + */ + DSLPossibleExpression operator=(DSLExpression other); + + DSLExpression x(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression y(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression z(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression w(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression r(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression g(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression b(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression a(PositionInfo pos = PositionInfo::Capture()); + + /** + * Creates an SkSL struct field access expression. + */ + DSLExpression field(skstd::string_view name, PositionInfo pos = PositionInfo::Capture()); + + /** + * Creates an SkSL array index expression. + */ + DSLPossibleExpression operator[](DSLExpression index); + + DSLPossibleExpression operator()(SkTArray<DSLWrapper<DSLExpression>> args, + PositionInfo pos = PositionInfo::Capture()); + + DSLPossibleExpression operator()(ExpressionArray args, + PositionInfo pos = PositionInfo::Capture()); + + /** + * Returns true if this object contains an expression. DSLExpressions which were created with + * the empty constructor or which have already been release()ed do not have a value. + * DSLExpressions created with errors are still considered to have a value (but contain poison). + */ + bool hasValue() const { + return fExpression != nullptr; + } + + /** + * Returns true if this object contains an expression which is not poison. + */ + bool isValid() const; + + SK_CLANG_REINITIALIZES void swap(DSLExpression& other); + + /** + * Invalidates this object and returns the SkSL expression it represents. It is an error to call + * this on an invalid DSLExpression. + */ + std::unique_ptr<SkSL::Expression> release(); + +private: + /** + * Calls release if this expression has a value, otherwise returns null. + */ + std::unique_ptr<SkSL::Expression> releaseIfPossible(); + + std::unique_ptr<SkSL::Expression> fExpression; + + friend DSLExpression SampleChild(int index, DSLExpression coords); + + friend class DSLCore; + friend class DSLFunction; + friend class DSLPossibleExpression; + friend class DSLType; + friend class DSLVarBase; + friend class DSLWriter; + template<typename T> friend class DSLWrapper; +}; + +DSLPossibleExpression operator+(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator+(DSLExpression expr); +DSLPossibleExpression operator+=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator-(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator-(DSLExpression expr); +DSLPossibleExpression operator-=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator*(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator*=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator/(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator/=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator%(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator%=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator<<(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator<<=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator>>(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator>>=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator&&(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator||(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator&(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator&=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator|(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator|=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator^(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator^=(DSLExpression left, DSLExpression right); +DSLPossibleExpression LogicalXor(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator,(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator,(DSLPossibleExpression left, DSLExpression right); +DSLPossibleExpression operator,(DSLExpression left, DSLPossibleExpression right); +DSLPossibleExpression operator,(DSLPossibleExpression left, DSLPossibleExpression right); +DSLPossibleExpression operator==(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator!=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator>(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator<(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator>=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator<=(DSLExpression left, DSLExpression right); +DSLPossibleExpression operator!(DSLExpression expr); +DSLPossibleExpression operator~(DSLExpression expr); +DSLPossibleExpression operator++(DSLExpression expr); +DSLPossibleExpression operator++(DSLExpression expr, int); +DSLPossibleExpression operator--(DSLExpression expr); +DSLPossibleExpression operator--(DSLExpression expr, int); + +/** + * Represents an Expression which may have failed and/or have pending errors to report. Converting a + * PossibleExpression into an Expression requires PositionInfo so that any pending errors can be + * reported at the correct position. + * + * PossibleExpression is used instead of Expression in situations where it is not possible to + * capture the PositionInfo at the time of Expression construction (notably in operator overloads, + * where we cannot add default parameters). + */ +class DSLPossibleExpression { +public: + DSLPossibleExpression(std::unique_ptr<SkSL::Expression> expression); + + DSLPossibleExpression(DSLPossibleExpression&& other); + + ~DSLPossibleExpression(); + + bool valid() const { + return fExpression != nullptr; + } + + /** + * Reports any pending errors at the specified position. + */ + void reportErrors(PositionInfo pos); + + DSLType type(); + + DSLExpression x(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression y(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression z(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression w(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression r(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression g(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression b(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression a(PositionInfo pos = PositionInfo::Capture()); + + DSLExpression field(skstd::string_view name, PositionInfo pos = PositionInfo::Capture()); + + DSLPossibleExpression operator=(DSLExpression expr); + + DSLPossibleExpression operator=(int expr); + + DSLPossibleExpression operator=(float expr); + + DSLPossibleExpression operator=(double expr); + + DSLPossibleExpression operator[](DSLExpression index); + + DSLPossibleExpression operator()(SkTArray<DSLWrapper<DSLExpression>> args, + PositionInfo pos = PositionInfo::Capture()); + + DSLPossibleExpression operator()(ExpressionArray args, + PositionInfo pos = PositionInfo::Capture()); + + DSLPossibleExpression operator++(); + + DSLPossibleExpression operator++(int); + + DSLPossibleExpression operator--(); + + DSLPossibleExpression operator--(int); + + std::unique_ptr<SkSL::Expression> release(PositionInfo pos = PositionInfo::Capture()); + +private: + std::unique_ptr<SkSL::Expression> fExpression; + + friend class DSLExpression; +}; + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLFunction.h b/src/deps/skia/include/sksl/DSLFunction.h new file mode 100644 index 000000000..aa40bdfb6 --- /dev/null +++ b/src/deps/skia/include/sksl/DSLFunction.h @@ -0,0 +1,116 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_FUNCTION +#define SKSL_DSL_FUNCTION + +#include "include/sksl/DSLBlock.h" +#include "include/sksl/DSLExpression.h" +#include "include/sksl/DSLType.h" +#include "include/sksl/DSLVar.h" +#include "include/sksl/DSLWrapper.h" + +namespace SkSL { + +class Block; +class FunctionDeclaration; +class Variable; + +namespace dsl { + +class DSLType; + +class DSLFunction { +public: + template<class... Parameters> + DSLFunction(const DSLType& returnType, skstd::string_view name, Parameters&... parameters) + : DSLFunction(DSLModifiers(), returnType, name, parameters...) {} + + template<class... Parameters> + DSLFunction(const DSLModifiers& modifiers, const DSLType& returnType, skstd::string_view name, + Parameters&... parameters) { + SkTArray<DSLParameter*> parameterArray; + parameterArray.reserve_back(sizeof...(parameters)); + + // in C++17, we could just do: + // (parameterArray.push_back(¶meters), ...); + int unused[] = {0, (static_cast<void>(parameterArray.push_back(¶meters)), 0)...}; + static_cast<void>(unused); + // We can't have a default parameter and a template parameter pack at the same time, so + // unfortunately we can't capture position info from this overload. + this->init(modifiers, returnType, name, std::move(parameterArray), PositionInfo()); + } + + DSLFunction(const DSLType& returnType, skstd::string_view name, + SkTArray<DSLParameter*> parameters, PositionInfo pos = PositionInfo::Capture()) { + this->init(DSLModifiers(), returnType, name, std::move(parameters), pos); + } + + DSLFunction(const DSLModifiers& modifiers, const DSLType& returnType, skstd::string_view name, + SkTArray<DSLParameter*> parameters, PositionInfo pos = PositionInfo::Capture()) { + this->init(modifiers, returnType, name, std::move(parameters), pos); + } + + DSLFunction(const SkSL::FunctionDeclaration* decl) + : fDecl(decl) {} + + virtual ~DSLFunction() = default; + + template<class... Stmt> + void define(Stmt... stmts) { + DSLBlock block = DSLBlock(DSLStatement(std::move(stmts))...); + this->define(std::move(block)); + } + + void define(DSLBlock block, PositionInfo pos = PositionInfo::Capture()); + + /** + * Invokes the function with the given arguments. + */ + template<class... Args> + DSLExpression operator()(Args&&... args) { + ExpressionArray argArray; + argArray.reserve_back(sizeof...(args)); + this->collectArgs(argArray, std::forward<Args>(args)...); + return this->call(std::move(argArray)); + } + + /** + * Invokes the function with the given arguments. + */ + DSLExpression call(SkTArray<DSLWrapper<DSLExpression>> args, + PositionInfo pos = PositionInfo::Capture()); + + DSLExpression call(ExpressionArray args, PositionInfo pos = PositionInfo::Capture()); + +private: + void collectArgs(ExpressionArray& args) {} + + template<class... RemainingArgs> + void collectArgs(ExpressionArray& args, DSLVar& var, RemainingArgs&&... remaining) { + args.push_back(DSLExpression(var).release()); + collectArgs(args, std::forward<RemainingArgs>(remaining)...); + } + + template<class... RemainingArgs> + void collectArgs(ExpressionArray& args, DSLExpression expr, RemainingArgs&&... remaining) { + args.push_back(expr.release()); + collectArgs(args, std::forward<RemainingArgs>(remaining)...); + } + + void init(DSLModifiers modifiers, const DSLType& returnType, skstd::string_view name, + SkTArray<DSLParameter*> params, PositionInfo pos); + + const SkSL::FunctionDeclaration* fDecl = nullptr; + SkSL::PositionInfo fPosition; +}; + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLLayout.h b/src/deps/skia/include/sksl/DSLLayout.h new file mode 100644 index 000000000..a0698f77f --- /dev/null +++ b/src/deps/skia/include/sksl/DSLLayout.h @@ -0,0 +1,94 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_LAYOUT +#define SKSL_DSL_LAYOUT + +#include "include/sksl/DSLLayout.h" + +#include "include/private/SkSLLayout.h" +#include "include/sksl/SkSLErrorReporter.h" + +namespace SkSL { + +namespace dsl { + +class DSLLayout { +public: + DSLLayout() {} + + DSLLayout& originUpperLeft(PositionInfo pos = PositionInfo::Capture()) { + return this->flag(SkSL::Layout::kOriginUpperLeft_Flag, "origin_upper_left", pos); + } + + DSLLayout& pushConstant(PositionInfo pos = PositionInfo::Capture()) { + return this->flag(SkSL::Layout::kPushConstant_Flag, "push_constant", pos); + } + + DSLLayout& blendSupportAllEquations(PositionInfo pos = PositionInfo::Capture()) { + return this->flag(SkSL::Layout::kBlendSupportAllEquations_Flag, + "blend_support_all_equations", pos); + } + + DSLLayout& color(PositionInfo pos = PositionInfo::Capture()) { + return this->flag(SkSL::Layout::kColor_Flag, "color", pos); + } + + DSLLayout& location(int location, PositionInfo pos = PositionInfo::Capture()) { + return this->intValue(&fSkSLLayout.fLocation, location, SkSL::Layout::kLocation_Flag, + "location", pos); + } + + DSLLayout& offset(int offset, PositionInfo pos = PositionInfo::Capture()) { + return this->intValue(&fSkSLLayout.fOffset, offset, SkSL::Layout::kOffset_Flag, "offset", + pos); + } + + DSLLayout& binding(int binding, PositionInfo pos = PositionInfo::Capture()) { + return this->intValue(&fSkSLLayout.fBinding, binding, SkSL::Layout::kBinding_Flag, + "binding", pos); + } + + DSLLayout& index(int index, PositionInfo pos = PositionInfo::Capture()) { + return this->intValue(&fSkSLLayout.fIndex, index, SkSL::Layout::kIndex_Flag, "index", pos); + } + + DSLLayout& set(int set, PositionInfo pos = PositionInfo::Capture()) { + return this->intValue(&fSkSLLayout.fSet, set, SkSL::Layout::kSet_Flag, "set", pos); + } + + DSLLayout& builtin(int builtin, PositionInfo pos = PositionInfo::Capture()) { + return this->intValue(&fSkSLLayout.fBuiltin, builtin, SkSL::Layout::kBuiltin_Flag, + "builtin", pos); + } + + DSLLayout& inputAttachmentIndex(int inputAttachmentIndex, + PositionInfo pos = PositionInfo::Capture()) { + return this->intValue(&fSkSLLayout.fInputAttachmentIndex, inputAttachmentIndex, + SkSL::Layout::kInputAttachmentIndex_Flag, "input_attachment_index", + pos); + } + +private: + explicit DSLLayout(SkSL::Layout skslLayout) + : fSkSLLayout(skslLayout) {} + + DSLLayout& flag(SkSL::Layout::Flag mask, const char* name, PositionInfo pos); + + DSLLayout& intValue(int* target, int value, SkSL::Layout::Flag flag, const char* name, + PositionInfo pos); + + SkSL::Layout fSkSLLayout; + + friend class DSLModifiers; +}; + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLModifiers.h b/src/deps/skia/include/sksl/DSLModifiers.h new file mode 100644 index 000000000..10c6aaafa --- /dev/null +++ b/src/deps/skia/include/sksl/DSLModifiers.h @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_MODIFIERS +#define SKSL_DSL_MODIFIERS + +#include "include/core/SkSpan.h" +#include "include/private/SkSLModifiers.h" +#include "include/sksl/DSLLayout.h" + +namespace SkSL { + +namespace dsl { + +class DSLField; +class DSLType; + +enum Modifier { + kNo_Modifier = SkSL::Modifiers::kNo_Flag, + kConst_Modifier = SkSL::Modifiers::kConst_Flag, + kIn_Modifier = SkSL::Modifiers::kIn_Flag, + kOut_Modifier = SkSL::Modifiers::kOut_Flag, + kInOut_Modifier = SkSL::Modifiers::kIn_Flag | SkSL::Modifiers::kOut_Flag, + kUniform_Modifier = SkSL::Modifiers::kUniform_Flag, + kFlat_Modifier = SkSL::Modifiers::kFlat_Flag, + kNoPerspective_Modifier = SkSL::Modifiers::kNoPerspective_Flag, +}; + +class DSLModifiers { +public: + DSLModifiers(int flags = 0) + : DSLModifiers(DSLLayout(), flags) {} + + DSLModifiers(DSLLayout layout, int flags = 0) + : fModifiers(layout.fSkSLLayout, flags) {} + + int flags() const { + return fModifiers.fFlags; + } + + DSLLayout layout() const { + return DSLLayout(fModifiers.fLayout); + } + +private: + SkSL::Modifiers fModifiers; + + friend DSLType Struct(skstd::string_view name, SkSpan<DSLField> fields, PositionInfo pos); + friend class DSLCore; + friend class DSLFunction; + friend class DSLType; + friend class DSLVarBase; + friend class DSLWriter; +}; + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLRuntimeEffects.h b/src/deps/skia/include/sksl/DSLRuntimeEffects.h new file mode 100644 index 000000000..3d9b99091 --- /dev/null +++ b/src/deps/skia/include/sksl/DSLRuntimeEffects.h @@ -0,0 +1,32 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_RUNTIME_EFFECTS +#define SKSL_DSL_RUNTIME_EFFECTS + +#include "include/effects/SkRuntimeEffect.h" +#include "include/sksl/DSL.h" + +namespace SkSL { + +class Compiler; + +namespace dsl { + +#ifndef SKSL_STANDALONE + +void StartRuntimeShader(SkSL::Compiler* compiler); + +sk_sp<SkRuntimeEffect> EndRuntimeShader(SkRuntimeEffect::Options options = {}); + +#endif + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLStatement.h b/src/deps/skia/include/sksl/DSLStatement.h new file mode 100644 index 000000000..976198ca1 --- /dev/null +++ b/src/deps/skia/include/sksl/DSLStatement.h @@ -0,0 +1,111 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_STATEMENT +#define SKSL_DSL_STATEMENT + +#include "include/core/SkString.h" +#include "include/core/SkTypes.h" +#include "include/private/SkSLStatement.h" +#include "include/sksl/SkSLErrorReporter.h" + +#include <memory> + +class GrGLSLShaderBuilder; + +namespace SkSL { + +class Expression; +class Statement; + +namespace dsl { + +class DSLBlock; +class DSLExpression; +class DSLPossibleExpression; +class DSLPossibleStatement; +class DSLVar; + +class DSLStatement { +public: + DSLStatement(); + + DSLStatement(DSLExpression expr); + + DSLStatement(DSLPossibleExpression expr, PositionInfo pos = PositionInfo::Capture()); + + DSLStatement(DSLPossibleStatement stmt, PositionInfo pos = PositionInfo::Capture()); + + DSLStatement(DSLBlock block); + + DSLStatement(DSLStatement&&) = default; + + DSLStatement(std::unique_ptr<SkSL::Statement> stmt); + + DSLStatement(std::unique_ptr<SkSL::Expression> expr); + + ~DSLStatement(); + + DSLStatement& operator=(DSLStatement&& other) = default; + + bool hasValue() { return fStatement != nullptr; } + + std::unique_ptr<SkSL::Statement> release() { + SkASSERT(this->hasValue()); + return std::move(fStatement); + } + +private: + std::unique_ptr<SkSL::Statement> releaseIfPossible() { + return std::move(fStatement); + } + + std::unique_ptr<SkSL::Statement> fStatement; + + friend class DSLBlock; + friend class DSLCore; + friend class DSLExpression; + friend class DSLPossibleStatement; + friend class DSLWriter; + friend DSLStatement operator,(DSLStatement left, DSLStatement right); +}; + +/** + * Represents a Statement which may have failed and/or have pending errors to report. Converting a + * PossibleStatement into a Statement requires PositionInfo so that any pending errors can be + * reported at the correct position. + * + * PossibleStatement is used instead of Statement in situations where it is not possible to capture + * the PositionInfo at the time of Statement construction. + */ +class DSLPossibleStatement { +public: + DSLPossibleStatement(std::unique_ptr<SkSL::Statement> stmt); + + DSLPossibleStatement(DSLPossibleStatement&& other) = default; + + ~DSLPossibleStatement(); + + bool hasValue() { return fStatement != nullptr; } + + std::unique_ptr<SkSL::Statement> release() { + return DSLStatement(std::move(*this)).release(); + } + +private: + std::unique_ptr<SkSL::Statement> fStatement; + + friend class DSLStatement; +}; + +DSLStatement operator,(DSLStatement left, DSLStatement right); + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLSymbols.h b/src/deps/skia/include/sksl/DSLSymbols.h new file mode 100644 index 000000000..9eebeda7c --- /dev/null +++ b/src/deps/skia/include/sksl/DSLSymbols.h @@ -0,0 +1,71 @@ +/* + * Copyright 2021 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_SYMBOLS +#define SKSL_DSL_SYMBOLS + +#include "include/core/SkStringView.h" +#include "include/private/SkSLString.h" +#include "include/sksl/DSLExpression.h" + +#include <memory> + +namespace SkSL { + +class SymbolTable; + +namespace dsl { + +class DSLVar; + +// This header provides methods for manually managing symbol tables in DSL code. They should not be +// used by normal hand-written DSL code, where we rely on C++ to manage symbols, but are instead +// needed when DSL objects are being constructed programmatically (as in DSLParser). + +/** + * Pushes a new symbol table onto the symbol table stack. + */ +void PushSymbolTable(); + +/** + * Pops the top symbol table from the stack. As symbol tables are shared pointers, this will only + * destroy the symbol table if it was never attached to anything (e.g. passed into a Block + * constructor). + */ +void PopSymbolTable(); + +/** + * Returns the current symbol table. Outside of SkSL itself, this is an opaque pointer, used only + * for passing it to DSL methods that require it. + */ +std::shared_ptr<SymbolTable> CurrentSymbolTable(); + +/** + * Returns an expression referring to the named symbol. + */ +DSLPossibleExpression Symbol(skstd::string_view name, PositionInfo pos = PositionInfo::Capture()); + +/** + * Returns true if the name refers to a type (user or built-in) in the current symbol table. + */ +bool IsType(skstd::string_view name); + +/** + * Returns true if the name refers to a builtin type. + */ +bool IsBuiltinType(skstd::string_view name); + +/** + * Adds a variable to the current symbol table. + */ +void AddToSymbolTable(DSLVarBase& var, PositionInfo pos = PositionInfo::Capture()); + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLType.h b/src/deps/skia/include/sksl/DSLType.h new file mode 100644 index 000000000..4e6150f34 --- /dev/null +++ b/src/deps/skia/include/sksl/DSLType.h @@ -0,0 +1,259 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_TYPE +#define SKSL_DSL_TYPE + +#include "include/core/SkSpan.h" +#include "include/private/SkSLString.h" +#include "include/sksl/DSLExpression.h" +#include "include/sksl/DSLModifiers.h" + +#include <cstdint> + +namespace SkSL { + +class Type; + +namespace dsl { + +class DSLExpression; +class DSLField; +class DSLVarBase; + +enum TypeConstant : uint8_t { + kBool_Type, + kBool2_Type, + kBool3_Type, + kBool4_Type, + kHalf_Type, + kHalf2_Type, + kHalf3_Type, + kHalf4_Type, + kHalf2x2_Type, + kHalf3x2_Type, + kHalf4x2_Type, + kHalf2x3_Type, + kHalf3x3_Type, + kHalf4x3_Type, + kHalf2x4_Type, + kHalf3x4_Type, + kHalf4x4_Type, + kFloat_Type, + kFloat2_Type, + kFloat3_Type, + kFloat4_Type, + kFragmentProcessor_Type, + kFloat2x2_Type, + kFloat3x2_Type, + kFloat4x2_Type, + kFloat2x3_Type, + kFloat3x3_Type, + kFloat4x3_Type, + kFloat2x4_Type, + kFloat3x4_Type, + kFloat4x4_Type, + kInt_Type, + kInt2_Type, + kInt3_Type, + kInt4_Type, + kShader_Type, + kShort_Type, + kShort2_Type, + kShort3_Type, + kShort4_Type, + kUInt_Type, + kUInt2_Type, + kUInt3_Type, + kUInt4_Type, + kUShort_Type, + kUShort2_Type, + kUShort3_Type, + kUShort4_Type, + kVoid_Type, + kPoison_Type, +}; + +class DSLType { +public: + DSLType(TypeConstant tc) + : fTypeConstant(tc) {} + + DSLType(const SkSL::Type* type); + + DSLType(skstd::string_view name); + + DSLType(skstd::string_view name, + DSLModifiers* modifiers, + PositionInfo pos = PositionInfo::Capture()); + + /** + * Returns true if this type is a bool. + */ + bool isBoolean() const; + + /** + * Returns true if this is a numeric scalar type. + */ + bool isNumber() const; + + /** + * Returns true if this is a floating-point scalar type (float or half). + */ + bool isFloat() const; + + /** + * Returns true if this is a signed scalar type (int or short). + */ + bool isSigned() const; + + /** + * Returns true if this is an unsigned scalar type (uint or ushort). + */ + bool isUnsigned() const; + + /** + * Returns true if this is a signed or unsigned integer. + */ + bool isInteger() const; + + /** + * Returns true if this is a scalar type. + */ + bool isScalar() const; + + /** + * Returns true if this is a vector type. + */ + bool isVector() const; + + /** + * Returns true if this is a matrix type. + */ + bool isMatrix() const; + + /** + * Returns true if this is a array type. + */ + bool isArray() const; + + /** + * Returns true if this is a struct type. + */ + bool isStruct() const; + + /** + * Returns true if this is a Skia object type (shader, colorFilter, blender). + */ + bool isEffectChild() const; + + template<typename... Args> + static DSLPossibleExpression Construct(DSLType type, DSLVarBase& var, Args&&... args) { + DSLExpression argArray[] = {var, args...}; + return Construct(type, SkMakeSpan(argArray)); + } + + template<typename... Args> + static DSLPossibleExpression Construct(DSLType type, DSLExpression expr, Args&&... args) { + DSLExpression argArray[] = {std::move(expr), std::move(args)...}; + return Construct(type, SkMakeSpan(argArray)); + } + + static DSLPossibleExpression Construct(DSLType type, SkSpan<DSLExpression> argArray); + +private: + const SkSL::Type& skslType() const; + + const SkSL::Type* fSkSLType = nullptr; + + TypeConstant fTypeConstant = kPoison_Type; + + friend DSLType Array(const DSLType& base, int count, PositionInfo pos); + friend DSLType Struct(skstd::string_view name, SkSpan<DSLField> fields, PositionInfo pos); + friend class DSLCore; + friend class DSLFunction; + friend class DSLVarBase; + friend class DSLWriter; +}; + +#define TYPE(T) \ + template<typename... Args> \ + DSLExpression T(Args&&... args) { \ + return DSLType::Construct(k ## T ## _Type, std::forward<Args>(args)...); \ + } + +#define VECTOR_TYPE(T) \ + TYPE(T) \ + TYPE(T ## 2) \ + TYPE(T ## 3) \ + TYPE(T ## 4) + +#define MATRIX_TYPE(T) \ + TYPE(T ## 2x2) \ + TYPE(T ## 3x2) \ + TYPE(T ## 4x2) \ + TYPE(T ## 2x3) \ + TYPE(T ## 3x3) \ + TYPE(T ## 4x3) \ + TYPE(T ## 2x4) \ + TYPE(T ## 3x4) \ + TYPE(T ## 4x4) + +VECTOR_TYPE(Bool) +VECTOR_TYPE(Float) +VECTOR_TYPE(Half) +VECTOR_TYPE(Int) +VECTOR_TYPE(UInt) +VECTOR_TYPE(Short) +VECTOR_TYPE(UShort) + +MATRIX_TYPE(Float) +MATRIX_TYPE(Half) + +#undef TYPE +#undef VECTOR_TYPE +#undef MATRIX_TYPE + +DSLType Array(const DSLType& base, int count, PositionInfo pos = PositionInfo::Capture()); + +class DSLField { +public: + DSLField(const DSLType type, skstd::string_view name, + PositionInfo pos = PositionInfo::Capture()) + : DSLField(DSLModifiers(), type, name, pos) {} + + DSLField(const DSLModifiers& modifiers, const DSLType type, skstd::string_view name, + PositionInfo pos = PositionInfo::Capture()) + : fModifiers(modifiers) + , fType(type) + , fName(name) + , fPosition(pos) {} + +private: + DSLModifiers fModifiers; + const DSLType fType; + skstd::string_view fName; + PositionInfo fPosition; + + friend class DSLCore; + friend DSLType Struct(skstd::string_view name, SkSpan<DSLField> fields, PositionInfo pos); +}; + +DSLType Struct(skstd::string_view name, SkSpan<DSLField> fields, + PositionInfo pos = PositionInfo::Capture()); + +template<typename... Field> +DSLType Struct(skstd::string_view name, Field... fields) { + DSLField fieldTypes[] = {std::move(fields)...}; + return Struct(name, SkMakeSpan(fieldTypes), PositionInfo()); +} + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/DSLVar.h b/src/deps/skia/include/sksl/DSLVar.h new file mode 100644 index 000000000..391474c5a --- /dev/null +++ b/src/deps/skia/include/sksl/DSLVar.h @@ -0,0 +1,310 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_VAR +#define SKSL_DSL_VAR + +#include "include/sksl/DSLExpression.h" +#include "include/sksl/DSLModifiers.h" +#include "include/sksl/DSLType.h" + +namespace SkSL { + +class Expression; +class IRGenerator; +class SPIRVCodeGenerator; +class Variable; +enum class VariableStorage : int8_t; + +namespace dsl { + +class DSLVarBase { +public: + /** + * Creates an empty, unpopulated var. Can be replaced with a real var later via `swap`. + */ + DSLVarBase() : fType(kVoid_Type), fDeclared(true) {} + + /** + * Constructs a new variable with the specified type and name. The name is used (in mangled + * form) in the resulting shader code; it is not otherwise important. Since mangling prevents + * name conflicts and the variable's name is only important when debugging shaders, the name + * parameter is optional. + */ + DSLVarBase(DSLType type, skstd::string_view name, DSLExpression initialValue, PositionInfo pos); + + DSLVarBase(DSLType type, DSLExpression initialValue, PositionInfo pos); + + DSLVarBase(const DSLModifiers& modifiers, DSLType type, skstd::string_view name, + DSLExpression initialValue, PositionInfo pos); + + DSLVarBase(const DSLModifiers& modifiers, DSLType type, DSLExpression initialValue, + PositionInfo pos); + + DSLVarBase(DSLVarBase&&) = default; + + virtual ~DSLVarBase(); + + skstd::string_view name() const { + return fName; + } + + const DSLModifiers& modifiers() const { + return fModifiers; + } + + virtual VariableStorage storage() const = 0; + + DSLExpression x() { + return DSLExpression(*this, PositionInfo()).x(); + } + + DSLExpression y() { + return DSLExpression(*this, PositionInfo()).y(); + } + + DSLExpression z() { + return DSLExpression(*this, PositionInfo()).z(); + } + + DSLExpression w() { + return DSLExpression(*this, PositionInfo()).w(); + } + + DSLExpression r() { + return DSLExpression(*this, PositionInfo()).r(); + } + + DSLExpression g() { + return DSLExpression(*this, PositionInfo()).g(); + } + + DSLExpression b() { + return DSLExpression(*this, PositionInfo()).b(); + } + + DSLExpression a() { + return DSLExpression(*this, PositionInfo()).a(); + } + + DSLExpression field(skstd::string_view name) { + return DSLExpression(*this, PositionInfo()).field(name); + } + + DSLPossibleExpression operator[](DSLExpression&& index); + + DSLPossibleExpression operator++() { + return ++DSLExpression(*this, PositionInfo()); + } + + DSLPossibleExpression operator++(int) { + return DSLExpression(*this, PositionInfo())++; + } + + DSLPossibleExpression operator--() { + return --DSLExpression(*this, PositionInfo()); + } + + DSLPossibleExpression operator--(int) { + return DSLExpression(*this, PositionInfo())--; + } + +protected: + DSLPossibleExpression assign(DSLExpression other); + + void swap(DSLVarBase& other); + + DSLModifiers fModifiers; + // We only need to keep track of the type here so that we can create the SkSL::Variable. For + // predefined variables this field is unnecessary, so we don't bother tracking it and just set + // it to kVoid; in other words, you shouldn't generally be relying on this field to be correct. + // If you need to determine the variable's type, look at DSLWriter::Var(...)->type() instead. + DSLType fType; + int fUniformHandle = -1; + std::unique_ptr<SkSL::Statement> fDeclaration; + const SkSL::Variable* fVar = nullptr; + skstd::string_view fRawName; // for error reporting + skstd::string_view fName; + DSLExpression fInitialValue; + // true if we have attempted to create the SkSL var + bool fInitialized = false; + bool fDeclared = false; + PositionInfo fPosition; + + friend class DSLCore; + friend class DSLExpression; + friend class DSLFunction; + friend class DSLWriter; + friend class ::SkSL::IRGenerator; + friend class ::SkSL::SPIRVCodeGenerator; +}; + +/** + * A local variable. + */ +class DSLVar : public DSLVarBase { +public: + DSLVar() = default; + + DSLVar(DSLType type, skstd::string_view name = "var", + DSLExpression initialValue = DSLExpression(), + PositionInfo pos = PositionInfo::Capture()) + : INHERITED(type, name, std::move(initialValue), pos) {} + + DSLVar(DSLType type, const char* name, DSLExpression initialValue = DSLExpression(), + PositionInfo pos = PositionInfo::Capture()) + : DSLVar(type, skstd::string_view(name), std::move(initialValue), pos) {} + + DSLVar(DSLType type, DSLExpression initialValue, PositionInfo pos = PositionInfo::Capture()) + : INHERITED(type, std::move(initialValue), pos) {} + + DSLVar(const DSLModifiers& modifiers, DSLType type, skstd::string_view name = "var", + DSLExpression initialValue = DSLExpression(), PositionInfo pos = PositionInfo::Capture()) + : INHERITED(modifiers, type, name, std::move(initialValue), pos) {} + + DSLVar(const DSLModifiers& modifiers, DSLType type, const char* name, + DSLExpression initialValue = DSLExpression(), PositionInfo pos = PositionInfo::Capture()) + : DSLVar(modifiers, type, skstd::string_view(name), std::move(initialValue), pos) {} + + DSLVar(DSLVar&&) = default; + + VariableStorage storage() const override; + + void swap(DSLVar& other); + + DSLPossibleExpression operator=(DSLExpression expr); + + DSLPossibleExpression operator=(DSLVar& param) { + return this->operator=(DSLExpression(param)); + } + + template<class Param> + DSLPossibleExpression operator=(Param& param) { + return this->operator=(DSLExpression(param)); + } + +private: + using INHERITED = DSLVarBase; +}; + +/** + * A global variable. + */ +class DSLGlobalVar : public DSLVarBase { +public: + DSLGlobalVar() = default; + + DSLGlobalVar(DSLType type, skstd::string_view name = "var", + DSLExpression initialValue = DSLExpression(), PositionInfo pos = PositionInfo::Capture()) + : INHERITED(type, name, std::move(initialValue), pos) {} + + DSLGlobalVar(DSLType type, const char* name, DSLExpression initialValue = DSLExpression(), + PositionInfo pos = PositionInfo::Capture()) + : DSLGlobalVar(type, skstd::string_view(name), std::move(initialValue), pos) {} + + DSLGlobalVar(DSLType type, DSLExpression initialValue, + PositionInfo pos = PositionInfo::Capture()) + : INHERITED(type, std::move(initialValue), pos) {} + + DSLGlobalVar(const DSLModifiers& modifiers, DSLType type, skstd::string_view name = "var", + DSLExpression initialValue = DSLExpression(), PositionInfo pos = PositionInfo::Capture()) + : INHERITED(modifiers, type, name, std::move(initialValue), pos) {} + + DSLGlobalVar(const DSLModifiers& modifiers, DSLType type, const char* name, + DSLExpression initialValue = DSLExpression(), PositionInfo pos = PositionInfo::Capture()) + : DSLGlobalVar(modifiers, type, skstd::string_view(name), std::move(initialValue), pos) {} + + DSLGlobalVar(const char* name); + + DSLGlobalVar(DSLGlobalVar&&) = default; + + VariableStorage storage() const override; + + void swap(DSLGlobalVar& other); + + DSLPossibleExpression operator=(DSLExpression expr); + + DSLPossibleExpression operator=(DSLGlobalVar& param) { + return this->operator=(DSLExpression(param)); + } + + template<class Param> + DSLPossibleExpression operator=(Param& param) { + return this->operator=(DSLExpression(param)); + } + + /** + * Implements the following method calls: + * half4 shader::eval(float2 coords); + * half4 colorFilter::eval(half4 input); + */ + DSLExpression eval(DSLExpression x, PositionInfo pos = PositionInfo::Capture()); + + /** + * Implements the following method call: + * half4 blender::eval(half4 src, half4 dst); + */ + DSLExpression eval(DSLExpression x, DSLExpression y, + PositionInfo pos = PositionInfo::Capture()); + +private: + DSLExpression eval(ExpressionArray args, PositionInfo pos); + + std::unique_ptr<SkSL::Expression> methodCall(skstd::string_view methodName, PositionInfo pos); + + using INHERITED = DSLVarBase; +}; + +/** + * A function parameter. + */ +class DSLParameter : public DSLVarBase { +public: + DSLParameter() = default; + + DSLParameter(DSLType type, skstd::string_view name = "var", + PositionInfo pos = PositionInfo::Capture()) + : INHERITED(type, name, DSLExpression(), pos) {} + + DSLParameter(DSLType type, const char* name, PositionInfo pos = PositionInfo::Capture()) + : DSLParameter(type, skstd::string_view(name), pos) {} + + DSLParameter(const DSLModifiers& modifiers, DSLType type, skstd::string_view name = "var", + PositionInfo pos = PositionInfo::Capture()) + : INHERITED(modifiers, type, name, DSLExpression(), pos) {} + + DSLParameter(const DSLModifiers& modifiers, DSLType type, const char* name, + PositionInfo pos = PositionInfo::Capture()) + : DSLParameter(modifiers, type, skstd::string_view(name), pos) {} + + DSLParameter(DSLParameter&&) = default; + + VariableStorage storage() const override; + + void swap(DSLParameter& other); + + DSLPossibleExpression operator=(DSLExpression expr); + + DSLPossibleExpression operator=(DSLParameter& param) { + return this->operator=(DSLExpression(param)); + } + + template<class Param> + DSLPossibleExpression operator=(Param& param) { + return this->operator=(DSLExpression(param)); + } + +private: + using INHERITED = DSLVarBase; +}; + +} // namespace dsl + +} // namespace SkSL + + +#endif diff --git a/src/deps/skia/include/sksl/DSLWrapper.h b/src/deps/skia/include/sksl/DSLWrapper.h new file mode 100644 index 000000000..96daa22dd --- /dev/null +++ b/src/deps/skia/include/sksl/DSLWrapper.h @@ -0,0 +1,77 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DSL_WRAPPER +#define SKSL_DSL_WRAPPER + +#include <memory> + +namespace SkSL { + +namespace dsl { + +/** + * Several of the DSL classes override operator= in a non-standard fashion to allow for expressions + * like "x = 0" to compile into SkSL code. This makes it impossible to directly use these classes in + * C++ containers which expect standard behavior for operator=. + * + * Wrapper<T> contains a T, where T is a DSL class with non-standard operator=, and provides + * standard behavior for operator=, permitting it to be used in standard containers. + */ +template<typename T> +class DSLWrapper { +public: + DSLWrapper(T value) { + fValue.swap(value); + } + + DSLWrapper(const DSLWrapper&) = delete; + + DSLWrapper(DSLWrapper&& other) { + fValue.swap(other.fValue); + } + + T& get() { + return fValue; + } + + T& operator*() { + return fValue; + } + + T* operator->() { + return &fValue; + } + + const T& get() const { + return fValue; + } + + const T& operator*() const { + return fValue; + } + + const T* operator->() const { + return &fValue; + } + + DSLWrapper& operator=(const DSLWrapper&) = delete; + + DSLWrapper& operator=(DSLWrapper&& other) { + fValue.swap(other.fValue); + return *this; + } + +private: + T fValue; +}; + +} // namespace dsl + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/SkSLDebugTrace.h b/src/deps/skia/include/sksl/SkSLDebugTrace.h new file mode 100644 index 000000000..9c5eafbc9 --- /dev/null +++ b/src/deps/skia/include/sksl/SkSLDebugTrace.h @@ -0,0 +1,28 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_DEBUG_TRACE +#define SKSL_DEBUG_TRACE + +#include "include/core/SkRefCnt.h" + +class SkWStream; + +namespace SkSL { + +class DebugTrace : public SkRefCnt { +public: + /** Serializes a debug trace to JSON which can be parsed by our debugger. */ + virtual void writeTrace(SkWStream* w) const = 0; + + /** Generates a human-readable dump of the debug trace. */ + virtual void dump(SkWStream* o) const = 0; +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/sksl/SkSLErrorReporter.h b/src/deps/skia/include/sksl/SkSLErrorReporter.h new file mode 100644 index 000000000..48cc0958e --- /dev/null +++ b/src/deps/skia/include/sksl/SkSLErrorReporter.h @@ -0,0 +1,115 @@ +/* + * Copyright 2021 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SKSL_ERROR_REPORTER +#define SKSL_ERROR_REPORTER + +#include "include/core/SkStringView.h" +#include "include/core/SkTypes.h" +#include "include/private/SkSLString.h" + +#include <string> +#include <vector> + +namespace SkSL { + +#ifndef __has_builtin + #define __has_builtin(x) 0 +#endif + +class PositionInfo { +public: + PositionInfo(const char* file = nullptr, int line = -1) + : fFile(file) + , fLine(line) {} + +#if __has_builtin(__builtin_FILE) && __has_builtin(__builtin_LINE) + static PositionInfo Capture(const char* file = __builtin_FILE(), int line = __builtin_LINE()) { + return PositionInfo(file, line); + } +#else + static PositionInfo Capture() { return PositionInfo(); } +#endif // __has_builtin(__builtin_FILE) && __has_builtin(__builtin_LINE) + + const char* file_name() const { + return fFile; + } + + int line() const { + return fLine; + } + +private: + const char* fFile = nullptr; + int32_t fLine = -1; +}; + +/** + * Class which is notified in the event of an error. + */ +class ErrorReporter { +public: + ErrorReporter() {} + + virtual ~ErrorReporter() { + SkASSERT(fPendingErrors.empty()); + } + + void error(skstd::string_view msg, PositionInfo position); + + /** + * Reports an error message at the given line of the source text. Errors reported + * with a line of -1 will be queued until line number information can be determined. + */ + void error(int line, skstd::string_view msg); + + const char* source() const { return fSource; } + + void setSource(const char* source) { fSource = source; } + + void reportPendingErrors(PositionInfo pos) { + for (const String& msg : fPendingErrors) { + this->handleError(msg, pos); + } + fPendingErrors.clear(); + } + + int errorCount() const { + return fErrorCount; + } + + void resetErrorCount() { + fErrorCount = 0; + } + +protected: + /** + * Called when an error is reported. + */ + virtual void handleError(skstd::string_view msg, PositionInfo position) = 0; + +private: + PositionInfo position(int offset) const; + + const char* fSource = nullptr; + std::vector<String> fPendingErrors; + int fErrorCount = 0; +}; + +/** + * Error reporter for tests that need an SkSL context; aborts immediately if an error is reported. + */ +class TestingOnly_AbortErrorReporter : public ErrorReporter { +public: + void handleError(skstd::string_view msg, PositionInfo pos) override { + SK_ABORT("%.*s", (int)msg.length(), msg.data()); + } +}; + +} // namespace SkSL + +#endif diff --git a/src/deps/skia/include/svg/BUILD.bazel b/src/deps/skia/include/svg/BUILD.bazel new file mode 100644 index 000000000..7be34715f --- /dev/null +++ b/src/deps/skia/include/svg/BUILD.bazel @@ -0,0 +1,8 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkSVGCanvas_hdr", + hdrs = ["SkSVGCanvas.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkCanvas_hdr"], +) diff --git a/src/deps/skia/include/svg/SkSVGCanvas.h b/src/deps/skia/include/svg/SkSVGCanvas.h new file mode 100644 index 000000000..2d9858a79 --- /dev/null +++ b/src/deps/skia/include/svg/SkSVGCanvas.h @@ -0,0 +1,37 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkSVGCanvas_DEFINED +#define SkSVGCanvas_DEFINED + +#include "include/core/SkCanvas.h" + +class SkWStream; + +class SK_API SkSVGCanvas { +public: + enum { + kConvertTextToPaths_Flag = 0x01, // emit text as <path>s + kNoPrettyXML_Flag = 0x02, // suppress newlines and tabs in output + kRelativePathEncoding_Flag = 0x04, // use relative commands for path encoding + }; + + /** + * Returns a new canvas that will generate SVG commands from its draw calls, and send + * them to the provided stream. Ownership of the stream is not transfered, and it must + * remain valid for the lifetime of the returned canvas. + * + * The canvas may buffer some drawing calls, so the output is not guaranteed to be valid + * or complete until the canvas instance is deleted. + * + * The 'bounds' parameter defines an initial SVG viewport (viewBox attribute on the root + * SVG element). + */ + static std::unique_ptr<SkCanvas> Make(const SkRect& bounds, SkWStream*, uint32_t flags = 0); +}; + +#endif diff --git a/src/deps/skia/include/third_party/skcms/BUILD.bazel b/src/deps/skia/include/third_party/skcms/BUILD.bazel new file mode 100644 index 000000000..0aef4dd49 --- /dev/null +++ b/src/deps/skia/include/third_party/skcms/BUILD.bazel @@ -0,0 +1,7 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "skcms_hdr", + hdrs = ["skcms.h"], + visibility = ["//:__subpackages__"], +) diff --git a/src/deps/skia/include/third_party/skcms/LICENSE b/src/deps/skia/include/third_party/skcms/LICENSE new file mode 100644 index 000000000..6c7c5be36 --- /dev/null +++ b/src/deps/skia/include/third_party/skcms/LICENSE @@ -0,0 +1,29 @@ +// Copyright (c) 2018 Google Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- diff --git a/src/deps/skia/include/third_party/skcms/OWNERS b/src/deps/skia/include/third_party/skcms/OWNERS new file mode 100644 index 000000000..cc36d27e3 --- /dev/null +++ b/src/deps/skia/include/third_party/skcms/OWNERS @@ -0,0 +1,2 @@ +# The auto-roller directly checks in skcms, so give it ownership as well: +skia-autoroll@skia-public.iam.gserviceaccount.com diff --git a/src/deps/skia/include/third_party/skcms/skcms.h b/src/deps/skia/include/third_party/skcms/skcms.h new file mode 100644 index 000000000..3e02d9548 --- /dev/null +++ b/src/deps/skia/include/third_party/skcms/skcms.h @@ -0,0 +1,394 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#pragma once + +// skcms.h contains the entire public API for skcms. + +#ifndef SKCMS_API + #define SKCMS_API +#endif + +#include <stdbool.h> +#include <stddef.h> +#include <stdint.h> +#include <string.h> + +#ifdef __cplusplus +extern "C" { +#endif + +// A row-major 3x3 matrix (ie vals[row][col]) +typedef struct skcms_Matrix3x3 { + float vals[3][3]; +} skcms_Matrix3x3; + +// It is _not_ safe to alias the pointers to invert in-place. +SKCMS_API bool skcms_Matrix3x3_invert(const skcms_Matrix3x3*, skcms_Matrix3x3*); +SKCMS_API skcms_Matrix3x3 skcms_Matrix3x3_concat(const skcms_Matrix3x3*, const skcms_Matrix3x3*); + +// A row-major 3x4 matrix (ie vals[row][col]) +typedef struct skcms_Matrix3x4 { + float vals[3][4]; +} skcms_Matrix3x4; + +// A transfer function mapping encoded values to linear values, +// represented by this 7-parameter piecewise function: +// +// linear = sign(encoded) * (c*|encoded| + f) , 0 <= |encoded| < d +// = sign(encoded) * ((a*|encoded| + b)^g + e), d <= |encoded| +// +// (A simple gamma transfer function sets g to gamma and a to 1.) +typedef struct skcms_TransferFunction { + float g, a,b,c,d,e,f; +} skcms_TransferFunction; + +SKCMS_API float skcms_TransferFunction_eval (const skcms_TransferFunction*, float); +SKCMS_API bool skcms_TransferFunction_invert(const skcms_TransferFunction*, + skcms_TransferFunction*); + +// We can jam a couple alternate transfer function forms into skcms_TransferFunction, +// including those matching the general forms of the SMPTE ST 2084 PQ function or HLG. +// +// PQish: +// max(A + B|encoded|^C, 0) +// linear = sign(encoded) * (------------------------) ^ F +// D + E|encoded|^C +SKCMS_API bool skcms_TransferFunction_makePQish(skcms_TransferFunction*, + float A, float B, float C, + float D, float E, float F); +// HLGish: +// { K * sign(encoded) * ( (R|encoded|)^G ) when 0 <= |encoded| <= 1/R +// linear = { K * sign(encoded) * ( e^(a(|encoded|-c)) + b ) when 1/R < |encoded| +SKCMS_API bool skcms_TransferFunction_makeScaledHLGish(skcms_TransferFunction*, + float K, float R, float G, + float a, float b, float c); + +// Compatibility shim with K=1 for old callers. +static inline bool skcms_TransferFunction_makeHLGish(skcms_TransferFunction* fn, + float R, float G, + float a, float b, float c) { + return skcms_TransferFunction_makeScaledHLGish(fn, 1.0f, R,G, a,b,c); +} + +// PQ mapping encoded [0,1] to linear [0,1]. +static inline bool skcms_TransferFunction_makePQ(skcms_TransferFunction* tf) { + return skcms_TransferFunction_makePQish(tf, -107/128.0f, 1.0f, 32/2523.0f + , 2413/128.0f, -2392/128.0f, 8192/1305.0f); +} +// HLG mapping encoded [0,1] to linear [0,12]. +static inline bool skcms_TransferFunction_makeHLG(skcms_TransferFunction* tf) { + return skcms_TransferFunction_makeHLGish(tf, 2.0f, 2.0f + , 1/0.17883277f, 0.28466892f, 0.55991073f); +} + +// Is this an ordinary sRGB-ish transfer function, or one of the HDR forms we support? +SKCMS_API bool skcms_TransferFunction_isSRGBish(const skcms_TransferFunction*); +SKCMS_API bool skcms_TransferFunction_isPQish (const skcms_TransferFunction*); +SKCMS_API bool skcms_TransferFunction_isHLGish (const skcms_TransferFunction*); + +// Unified representation of 'curv' or 'para' tag data, or a 1D table from 'mft1' or 'mft2' +typedef union skcms_Curve { + struct { + uint32_t alias_of_table_entries; + skcms_TransferFunction parametric; + }; + struct { + uint32_t table_entries; + const uint8_t* table_8; + const uint8_t* table_16; + }; +} skcms_Curve; + +// Complex transforms between device space (A) and profile connection space (B): +// A2B: device -> [ "A" curves -> CLUT ] -> [ "M" curves -> matrix ] -> "B" curves -> PCS +// B2A: device <- [ "A" curves <- CLUT ] <- [ "M" curves <- matrix ] <- "B" curves <- PCS + +typedef struct skcms_A2B { + // Optional: N 1D "A" curves, followed by an N-dimensional CLUT. + // If input_channels == 0, these curves and CLUT are skipped, + // Otherwise, input_channels must be in [1, 4]. + uint32_t input_channels; + skcms_Curve input_curves[4]; + uint8_t grid_points[4]; + const uint8_t* grid_8; + const uint8_t* grid_16; + + // Optional: 3 1D "M" curves, followed by a color matrix. + // If matrix_channels == 0, these curves and matrix are skipped, + // Otherwise, matrix_channels must be 3. + uint32_t matrix_channels; + skcms_Curve matrix_curves[3]; + skcms_Matrix3x4 matrix; + + // Required: 3 1D "B" curves. Always present, and output_channels must be 3. + uint32_t output_channels; + skcms_Curve output_curves[3]; +} skcms_A2B; + +typedef struct skcms_B2A { + // Required: 3 1D "B" curves. Always present, and input_channels must be 3. + uint32_t input_channels; + skcms_Curve input_curves[3]; + + // Optional: a color matrix, followed by 3 1D "M" curves. + // If matrix_channels == 0, this matrix and these curves are skipped, + // Otherwise, matrix_channels must be 3. + uint32_t matrix_channels; + skcms_Matrix3x4 matrix; + skcms_Curve matrix_curves[3]; + + // Optional: an N-dimensional CLUT, followed by N 1D "A" curves. + // If output_channels == 0, this CLUT and these curves are skipped, + // Otherwise, output_channels must be in [1, 4]. + uint32_t output_channels; + uint8_t grid_points[4]; + const uint8_t* grid_8; + const uint8_t* grid_16; + skcms_Curve output_curves[4]; +} skcms_B2A; + + +typedef struct skcms_ICCProfile { + const uint8_t* buffer; + + uint32_t size; + uint32_t data_color_space; + uint32_t pcs; + uint32_t tag_count; + + // skcms_Parse() will set commonly-used fields for you when possible: + + // If we can parse red, green and blue transfer curves from the profile, + // trc will be set to those three curves, and has_trc will be true. + bool has_trc; + skcms_Curve trc[3]; + + // If this profile's gamut can be represented by a 3x3 transform to XYZD50, + // skcms_Parse() sets toXYZD50 to that transform and has_toXYZD50 to true. + bool has_toXYZD50; + skcms_Matrix3x3 toXYZD50; + + // If the profile has a valid A2B0 or A2B1 tag, skcms_Parse() sets A2B to + // that data, and has_A2B to true. skcms_ParseWithA2BPriority() does the + // same following any user-provided prioritization of A2B0, A2B1, or A2B2. + bool has_A2B; + skcms_A2B A2B; + + // If the profile has a valid B2A0 or B2A1 tag, skcms_Parse() sets B2A to + // that data, and has_B2A to true. skcms_ParseWithA2BPriority() does the + // same following any user-provided prioritization of B2A0, B2A1, or B2A2. + bool has_B2A; + skcms_B2A B2A; + +} skcms_ICCProfile; + +// The sRGB color profile is so commonly used that we offer a canonical skcms_ICCProfile for it. +SKCMS_API const skcms_ICCProfile* skcms_sRGB_profile(void); +// Ditto for XYZD50, the most common profile connection space. +SKCMS_API const skcms_ICCProfile* skcms_XYZD50_profile(void); + +SKCMS_API const skcms_TransferFunction* skcms_sRGB_TransferFunction(void); +SKCMS_API const skcms_TransferFunction* skcms_sRGB_Inverse_TransferFunction(void); +SKCMS_API const skcms_TransferFunction* skcms_Identity_TransferFunction(void); + +// Practical equality test for two skcms_ICCProfiles. +// The implementation is subject to change, but it will always try to answer +// "can I substitute A for B?" and "can I skip transforming from A to B?". +SKCMS_API bool skcms_ApproximatelyEqualProfiles(const skcms_ICCProfile* A, + const skcms_ICCProfile* B); + +// Practical test that answers: Is curve roughly the inverse of inv_tf? Typically used by passing +// the inverse of a known parametric transfer function (like sRGB), to determine if a particular +// curve is very close to sRGB. +SKCMS_API bool skcms_AreApproximateInverses(const skcms_Curve* curve, + const skcms_TransferFunction* inv_tf); + +// Similar to above, answering the question for all three TRC curves of the given profile. Again, +// passing skcms_sRGB_InverseTransferFunction as inv_tf will answer the question: +// "Does this profile have a transfer function that is very close to sRGB?" +SKCMS_API bool skcms_TRCs_AreApproximateInverse(const skcms_ICCProfile* profile, + const skcms_TransferFunction* inv_tf); + +// Parse an ICC profile and return true if possible, otherwise return false. +// Selects an A2B profile (if present) according to priority list (each entry 0-2). +// The buffer is not copied; it must remain valid as long as the skcms_ICCProfile will be used. +SKCMS_API bool skcms_ParseWithA2BPriority(const void*, size_t, + const int priority[], int priorities, + skcms_ICCProfile*); + +static inline bool skcms_Parse(const void* buf, size_t len, skcms_ICCProfile* profile) { + // For continuity of existing user expectations, + // prefer A2B0 (perceptual) over A2B1 (relative colormetric), and ignore A2B2 (saturation). + const int priority[] = {0,1}; + return skcms_ParseWithA2BPriority(buf, len, + priority, sizeof(priority)/sizeof(*priority), + profile); +} + +SKCMS_API bool skcms_ApproximateCurve(const skcms_Curve* curve, + skcms_TransferFunction* approx, + float* max_error); + +SKCMS_API bool skcms_GetCHAD(const skcms_ICCProfile*, skcms_Matrix3x3*); +SKCMS_API bool skcms_GetWTPT(const skcms_ICCProfile*, float xyz[3]); + +// These are common ICC signature values +enum { + // data_color_space + skcms_Signature_CMYK = 0x434D594B, + skcms_Signature_Gray = 0x47524159, + skcms_Signature_RGB = 0x52474220, + + // pcs + skcms_Signature_Lab = 0x4C616220, + skcms_Signature_XYZ = 0x58595A20, +}; + +typedef enum skcms_PixelFormat { + skcms_PixelFormat_A_8, + skcms_PixelFormat_A_8_, + skcms_PixelFormat_G_8, + skcms_PixelFormat_G_8_, + skcms_PixelFormat_RGBA_8888_Palette8, + skcms_PixelFormat_BGRA_8888_Palette8, + + skcms_PixelFormat_RGB_565, + skcms_PixelFormat_BGR_565, + + skcms_PixelFormat_ABGR_4444, + skcms_PixelFormat_ARGB_4444, + + skcms_PixelFormat_RGB_888, + skcms_PixelFormat_BGR_888, + skcms_PixelFormat_RGBA_8888, + skcms_PixelFormat_BGRA_8888, + skcms_PixelFormat_RGBA_8888_sRGB, // Automatic sRGB encoding / decoding. + skcms_PixelFormat_BGRA_8888_sRGB, // (Generally used with linear transfer functions.) + + skcms_PixelFormat_RGBA_1010102, + skcms_PixelFormat_BGRA_1010102, + + skcms_PixelFormat_RGB_161616LE, // Little-endian. Pointers must be 16-bit aligned. + skcms_PixelFormat_BGR_161616LE, + skcms_PixelFormat_RGBA_16161616LE, + skcms_PixelFormat_BGRA_16161616LE, + + skcms_PixelFormat_RGB_161616BE, // Big-endian. Pointers must be 16-bit aligned. + skcms_PixelFormat_BGR_161616BE, + skcms_PixelFormat_RGBA_16161616BE, + skcms_PixelFormat_BGRA_16161616BE, + + skcms_PixelFormat_RGB_hhh_Norm, // 1-5-10 half-precision float in [0,1] + skcms_PixelFormat_BGR_hhh_Norm, // Pointers must be 16-bit aligned. + skcms_PixelFormat_RGBA_hhhh_Norm, + skcms_PixelFormat_BGRA_hhhh_Norm, + + skcms_PixelFormat_RGB_hhh, // 1-5-10 half-precision float. + skcms_PixelFormat_BGR_hhh, // Pointers must be 16-bit aligned. + skcms_PixelFormat_RGBA_hhhh, + skcms_PixelFormat_BGRA_hhhh, + + skcms_PixelFormat_RGB_fff, // 1-8-23 single-precision float (the normal kind). + skcms_PixelFormat_BGR_fff, // Pointers must be 32-bit aligned. + skcms_PixelFormat_RGBA_ffff, + skcms_PixelFormat_BGRA_ffff, +} skcms_PixelFormat; + +// We always store any alpha channel linearly. In the chart below, tf-1() is the inverse +// transfer function for the given color profile (applying the transfer function linearizes). + +// We treat opaque as a strong requirement, not just a performance hint: we will ignore +// any source alpha and treat it as 1.0, and will make sure that any destination alpha +// channel is filled with the equivalent of 1.0. + +// We used to offer multiple types of premultiplication, but now just one, PremulAsEncoded. +// This is the premul you're probably used to working with. + +typedef enum skcms_AlphaFormat { + skcms_AlphaFormat_Opaque, // alpha is always opaque + // tf-1(r), tf-1(g), tf-1(b), 1.0 + skcms_AlphaFormat_Unpremul, // alpha and color are unassociated + // tf-1(r), tf-1(g), tf-1(b), a + skcms_AlphaFormat_PremulAsEncoded, // premultiplied while encoded + // tf-1(r)*a, tf-1(g)*a, tf-1(b)*a, a +} skcms_AlphaFormat; + +// Convert npixels pixels from src format and color profile to dst format and color profile +// and return true, otherwise return false. It is safe to alias dst == src if dstFmt == srcFmt. +SKCMS_API bool skcms_Transform(const void* src, + skcms_PixelFormat srcFmt, + skcms_AlphaFormat srcAlpha, + const skcms_ICCProfile* srcProfile, + void* dst, + skcms_PixelFormat dstFmt, + skcms_AlphaFormat dstAlpha, + const skcms_ICCProfile* dstProfile, + size_t npixels); + +// As skcms_Transform(), supporting srcFmts with a palette. +SKCMS_API bool skcms_TransformWithPalette(const void* src, + skcms_PixelFormat srcFmt, + skcms_AlphaFormat srcAlpha, + const skcms_ICCProfile* srcProfile, + void* dst, + skcms_PixelFormat dstFmt, + skcms_AlphaFormat dstAlpha, + const skcms_ICCProfile* dstProfile, + size_t npixels, + const void* palette); + +// If profile can be used as a destination in skcms_Transform, return true. Otherwise, attempt to +// rewrite it with approximations where reasonable. If successful, return true. If no reasonable +// approximation exists, leave the profile unchanged and return false. +SKCMS_API bool skcms_MakeUsableAsDestination(skcms_ICCProfile* profile); + +// If profile can be used as a destination with a single parametric transfer function (ie for +// rasterization), return true. Otherwise, attempt to rewrite it with approximations where +// reasonable. If successful, return true. If no reasonable approximation exists, leave the +// profile unchanged and return false. +SKCMS_API bool skcms_MakeUsableAsDestinationWithSingleCurve(skcms_ICCProfile* profile); + +// Returns a matrix to adapt XYZ color from given the whitepoint to D50. +SKCMS_API bool skcms_AdaptToXYZD50(float wx, float wy, + skcms_Matrix3x3* toXYZD50); + +// Returns a matrix to convert RGB color into XYZ adapted to D50, given the +// primaries and whitepoint of the RGB model. +SKCMS_API bool skcms_PrimariesToXYZD50(float rx, float ry, + float gx, float gy, + float bx, float by, + float wx, float wy, + skcms_Matrix3x3* toXYZD50); + +// Call before your first call to skcms_Transform() to skip runtime CPU detection. +SKCMS_API void skcms_DisableRuntimeCPUDetection(void); + +// Utilities for programmatically constructing profiles +static inline void skcms_Init(skcms_ICCProfile* p) { + memset(p, 0, sizeof(*p)); + p->data_color_space = skcms_Signature_RGB; + p->pcs = skcms_Signature_XYZ; +} + +static inline void skcms_SetTransferFunction(skcms_ICCProfile* p, + const skcms_TransferFunction* tf) { + p->has_trc = true; + for (int i = 0; i < 3; ++i) { + p->trc[i].table_entries = 0; + p->trc[i].parametric = *tf; + } +} + +static inline void skcms_SetXYZD50(skcms_ICCProfile* p, const skcms_Matrix3x3* m) { + p->has_toXYZD50 = true; + p->toXYZD50 = *m; +} + +#ifdef __cplusplus +} +#endif diff --git a/src/deps/skia/include/third_party/vulkan/LICENSE b/src/deps/skia/include/third_party/vulkan/LICENSE new file mode 100644 index 000000000..6c7c5be36 --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/LICENSE @@ -0,0 +1,29 @@ +// Copyright (c) 2018 Google Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/BUILD.bazel b/src/deps/skia/include/third_party/vulkan/vulkan/BUILD.bazel new file mode 100644 index 000000000..73abd998f --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/BUILD.bazel @@ -0,0 +1,49 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "vk_platform_hdr", + hdrs = ["vk_platform.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "vulkan_android_hdr", + hdrs = ["vulkan_android.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "vulkan_core_hdr", + hdrs = ["vulkan_core.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "vulkan_hdr", + hdrs = ["vulkan.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "vulkan_ios_hdr", + hdrs = ["vulkan_ios.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "vulkan_macos_hdr", + hdrs = ["vulkan_macos.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "vulkan_win32_hdr", + hdrs = ["vulkan_win32.h"], + visibility = ["//:__subpackages__"], +) + +generated_cc_atom( + name = "vulkan_xcb_hdr", + hdrs = ["vulkan_xcb.h"], + visibility = ["//:__subpackages__"], +) diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/vk_platform.h b/src/deps/skia/include/third_party/vulkan/vulkan/vk_platform.h new file mode 100644 index 000000000..18b913abc --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/vk_platform.h @@ -0,0 +1,84 @@ +// +// File: vk_platform.h +// +/* +** Copyright 2014-2021 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + + +#ifndef VK_PLATFORM_H_ +#define VK_PLATFORM_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif // __cplusplus + +/* +*************************************************************************************************** +* Platform-specific directives and type declarations +*************************************************************************************************** +*/ + +/* Platform-specific calling convention macros. + * + * Platforms should define these so that Vulkan clients call Vulkan commands + * with the same calling conventions that the Vulkan implementation expects. + * + * VKAPI_ATTR - Placed before the return type in function declarations. + * Useful for C++11 and GCC/Clang-style function attribute syntax. + * VKAPI_CALL - Placed after the return type in function declarations. + * Useful for MSVC-style calling convention syntax. + * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. + * + * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); + * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); + */ +#if defined(_WIN32) + // On Windows, Vulkan commands use the stdcall convention + #define VKAPI_ATTR + #define VKAPI_CALL __stdcall + #define VKAPI_PTR VKAPI_CALL +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 + #error "Vulkan isn't supported for the 'armeabi' NDK ABI" +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) + // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" + // calling convention, i.e. float parameters are passed in registers. This + // is true even if the rest of the application passes floats on the stack, + // as it does by default when compiling for the armeabi-v7a NDK ABI. + #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) + #define VKAPI_CALL + #define VKAPI_PTR VKAPI_ATTR +#else + // On other platforms, use the default calling convention + #define VKAPI_ATTR + #define VKAPI_CALL + #define VKAPI_PTR +#endif + +#if !defined(VK_NO_STDDEF_H) + #include <stddef.h> +#endif // !defined(VK_NO_STDDEF_H) + +#if !defined(VK_NO_STDINT_H) + #if defined(_MSC_VER) && (_MSC_VER < 1600) + typedef signed __int8 int8_t; + typedef unsigned __int8 uint8_t; + typedef signed __int16 int16_t; + typedef unsigned __int16 uint16_t; + typedef signed __int32 int32_t; + typedef unsigned __int32 uint32_t; + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #else + #include <stdint.h> + #endif +#endif // !defined(VK_NO_STDINT_H) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/vulkan.h b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan.h new file mode 100644 index 000000000..b187c9c17 --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan.h @@ -0,0 +1,93 @@ +#ifndef VULKAN_H_ +#define VULKAN_H_ 1 + +/* +** Copyright 2015-2021 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +#include "vk_platform.h" +#include "vulkan_core.h" + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#include "vulkan_android.h" +#endif + +#ifdef VK_USE_PLATFORM_FUCHSIA +#include <zircon/types.h> +#include "vulkan_fuchsia.h" +#endif + +#ifdef VK_USE_PLATFORM_IOS_MVK +#include "vulkan_ios.h" +#endif + + +#ifdef VK_USE_PLATFORM_MACOS_MVK +#include "vulkan_macos.h" +#endif + +#ifdef VK_USE_PLATFORM_METAL_EXT +#include "vulkan_metal.h" +#endif + +#ifdef VK_USE_PLATFORM_VI_NN +#include "vulkan_vi.h" +#endif + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#include <wayland-client.h> +#include "vulkan_wayland.h" +#endif + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include <windows.h> +#include "vulkan_win32.h" +#endif + + +#ifdef VK_USE_PLATFORM_XCB_KHR +#include <xcb/xcb.h> +#include "vulkan_xcb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#include <X11/Xlib.h> +#include "vulkan_xlib.h" +#endif + + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT +#include <directfb.h> +#include "vulkan_directfb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +#include <X11/Xlib.h> +#include <X11/extensions/Xrandr.h> +#include "vulkan_xlib_xrandr.h" +#endif + + +#ifdef VK_USE_PLATFORM_GGP +#include <ggp_c/vulkan_types.h> +#include "vulkan_ggp.h" +#endif + + +#ifdef VK_USE_PLATFORM_SCREEN_QNX +#include <screen/screen.h> +#include "vulkan_screen.h" +#endif + + +#ifdef VK_ENABLE_BETA_EXTENSIONS +#include "vulkan_beta.h" +#endif + +#endif // VULKAN_H_ diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_android.h b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_android.h new file mode 100644 index 000000000..2160e3e7c --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_android.h @@ -0,0 +1,112 @@ +#ifndef VULKAN_ANDROID_H_ +#define VULKAN_ANDROID_H_ 1 + +/* +** Copyright 2015-2021 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_android_surface 1 +struct ANativeWindow; +#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6 +#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface" +typedef VkFlags VkAndroidSurfaceCreateFlagsKHR; +typedef struct VkAndroidSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAndroidSurfaceCreateFlagsKHR flags; + struct ANativeWindow* window; +} VkAndroidSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR( + VkInstance instance, + const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_ANDROID_external_memory_android_hardware_buffer 1 +struct AHardwareBuffer; +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3 +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer" +typedef struct VkAndroidHardwareBufferUsageANDROID { + VkStructureType sType; + void* pNext; + uint64_t androidHardwareBufferUsage; +} VkAndroidHardwareBufferUsageANDROID; + +typedef struct VkAndroidHardwareBufferPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeBits; +} VkAndroidHardwareBufferPropertiesANDROID; + +typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkFormat format; + uint64_t externalFormat; + VkFormatFeatureFlags formatFeatures; + VkComponentMapping samplerYcbcrConversionComponents; + VkSamplerYcbcrModelConversion suggestedYcbcrModel; + VkSamplerYcbcrRange suggestedYcbcrRange; + VkChromaLocation suggestedXChromaOffset; + VkChromaLocation suggestedYChromaOffset; +} VkAndroidHardwareBufferFormatPropertiesANDROID; + +typedef struct VkImportAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + struct AHardwareBuffer* buffer; +} VkImportAndroidHardwareBufferInfoANDROID; + +typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkMemoryGetAndroidHardwareBufferInfoANDROID; + +typedef struct VkExternalFormatANDROID { + VkStructureType sType; + void* pNext; + uint64_t externalFormat; +} VkExternalFormatANDROID; + +typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID( + VkDevice device, + const struct AHardwareBuffer* buffer, + VkAndroidHardwareBufferPropertiesANDROID* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID( + VkDevice device, + const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, + struct AHardwareBuffer** pBuffer); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_core.h b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_core.h new file mode 100644 index 000000000..3b4fe629c --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_core.h @@ -0,0 +1,12620 @@ +#ifndef VULKAN_CORE_H_ +#define VULKAN_CORE_H_ 1 + +/* +** Copyright 2015-2021 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_VERSION_1_0 1 +#include "vk_platform.h" + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + + +#ifndef VK_USE_64_BIT_PTR_DEFINES + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VK_USE_64_BIT_PTR_DEFINES 1 + #else + #define VK_USE_64_BIT_PTR_DEFINES 0 + #endif +#endif + + +#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE + #if (VK_USE_64_BIT_PTR_DEFINES==1) + #if __cplusplus >= 201103L || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L)) + #define VK_NULL_HANDLE nullptr + #else + #define VK_NULL_HANDLE ((void*)0) + #endif + #else + #define VK_NULL_HANDLE 0ULL + #endif +#endif +#ifndef VK_NULL_HANDLE + #define VK_NULL_HANDLE 0 +#endif + + +#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE + #if (VK_USE_64_BIT_PTR_DEFINES==1) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; + #else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; + #endif +#endif + +// DEPRECATED: This define is deprecated. VK_MAKE_API_VERSION should be used instead. +#define VK_MAKE_VERSION(major, minor, patch) \ + ((((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch))) + +// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. +//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 + +#define VK_MAKE_API_VERSION(variant, major, minor, patch) \ + ((((uint32_t)(variant)) << 29) | (((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch))) + +// Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_API_VERSION(0, 1, 0, 0)// Patch version should always be set to 0 + +// Version of this file +#define VK_HEADER_VERSION 176 + +// Complete version of this file +#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 2, VK_HEADER_VERSION) + +// DEPRECATED: This define is deprecated. VK_API_VERSION_MAJOR should be used instead. +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22) + +// DEPRECATED: This define is deprecated. VK_API_VERSION_MINOR should be used instead. +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3FFU) + +// DEPRECATED: This define is deprecated. VK_API_VERSION_PATCH should be used instead. +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) + +#define VK_API_VERSION_VARIANT(version) ((uint32_t)(version) >> 29) +#define VK_API_VERSION_MAJOR(version) (((uint32_t)(version) >> 22) & 0x7FU) +#define VK_API_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3FFU) +#define VK_API_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) +typedef uint32_t VkBool32; +typedef uint64_t VkDeviceAddress; +typedef uint64_t VkDeviceSize; +typedef uint32_t VkFlags; +typedef uint32_t VkSampleMask; +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) +VK_DEFINE_HANDLE(VkCommandBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) +#define VK_ATTACHMENT_UNUSED (~0U) +#define VK_FALSE 0U +#define VK_LOD_CLAMP_NONE 1000.0F +#define VK_QUEUE_FAMILY_IGNORED (~0U) +#define VK_REMAINING_ARRAY_LAYERS (~0U) +#define VK_REMAINING_MIP_LEVELS (~0U) +#define VK_SUBPASS_EXTERNAL (~0U) +#define VK_TRUE 1U +#define VK_WHOLE_SIZE (~0ULL) +#define VK_MAX_MEMORY_TYPES 32U +#define VK_MAX_MEMORY_HEAPS 16U +#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256U +#define VK_UUID_SIZE 16U +#define VK_MAX_EXTENSION_NAME_SIZE 256U +#define VK_MAX_DESCRIPTION_SIZE 256U + +typedef enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, + VK_ERROR_INITIALIZATION_FAILED = -3, + VK_ERROR_DEVICE_LOST = -4, + VK_ERROR_MEMORY_MAP_FAILED = -5, + VK_ERROR_LAYER_NOT_PRESENT = -6, + VK_ERROR_EXTENSION_NOT_PRESENT = -7, + VK_ERROR_FEATURE_NOT_PRESENT = -8, + VK_ERROR_INCOMPATIBLE_DRIVER = -9, + VK_ERROR_TOO_MANY_OBJECTS = -10, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11, + VK_ERROR_FRAGMENTED_POOL = -12, + VK_ERROR_UNKNOWN = -13, + VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000, + VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003, + VK_ERROR_FRAGMENTATION = -1000161000, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000, + VK_ERROR_SURFACE_LOST_KHR = -1000000000, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, + VK_SUBOPTIMAL_KHR = 1000001003, + VK_ERROR_OUT_OF_DATE_KHR = -1000001004, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, + VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, + VK_ERROR_INVALID_SHADER_NV = -1000012000, + VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000, + VK_ERROR_NOT_PERMITTED_EXT = -1000174001, + VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000, + VK_THREAD_IDLE_KHR = 1000268000, + VK_THREAD_DONE_KHR = 1000268001, + VK_OPERATION_DEFERRED_KHR = 1000268002, + VK_OPERATION_NOT_DEFERRED_KHR = 1000268003, + VK_PIPELINE_COMPILE_REQUIRED_EXT = 1000297000, + VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, + VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, + VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION, + VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED_EXT, + VK_RESULT_MAX_ENUM = 0x7FFFFFFF +} VkResult; + +typedef enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000, + VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES = 49, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES = 50, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES = 51, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES = 52, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO = 1000147000, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2 = 1000109000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2 = 1000109001, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2 = 1000109002, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2 = 1000109003, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2 = 1000109004, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO = 1000109005, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO = 1000109006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES = 1000177000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES = 1000196000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES = 1000180000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES = 1000082000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES = 1000197000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO = 1000161000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES = 1000161001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES = 1000161002, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO = 1000161003, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT = 1000161004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES = 1000199000, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE = 1000199001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES = 1000221000, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO = 1000246000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES = 1000130000, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO = 1000130001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES = 1000211000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES = 1000108000, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO = 1000108001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO = 1000108002, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO = 1000108003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES = 1000253000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES = 1000175000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES = 1000241000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT = 1000241001, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT = 1000241002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES = 1000261000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES = 1000207000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES = 1000207001, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO = 1000207002, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO = 1000207003, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO = 1000207004, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO = 1000207005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES = 1000257000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO = 1000244001, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004, + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_PROFILE_KHR = 1000023000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR = 1000023001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_KHR = 1000023002, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_GET_MEMORY_PROPERTIES_KHR = 1000023003, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_BIND_MEMORY_KHR = 1000023004, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR = 1000023005, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000023006, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_UPDATE_INFO_KHR = 1000023007, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_BEGIN_CODING_INFO_KHR = 1000023008, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_END_CODING_INFO_KHR = 1000023009, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR = 1000023010, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_KHR = 1000023011, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_QUEUE_FAMILY_PROPERTIES_2_KHR = 1000023012, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_PROFILES_KHR = 1000023013, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR = 1000023014, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR = 1000023015, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR = 1000024000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR = 1000299000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR = 1000299001, +#endif + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT = 1000028000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT = 1000028001, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT = 1000028002, + VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX = 1000030000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX = 1000030001, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_EXT = 1000038000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_CREATE_INFO_EXT = 1000038001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000038002, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000038003, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_VCL_FRAME_INFO_EXT = 1000038004, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_EXT = 1000038005, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_EXT = 1000038006, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_EXT = 1000038007, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_EXT = 1000038008, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_EXT = 1000040000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_CREATE_INFO_EXT = 1000040001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_EXT = 1000040002, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_MVC_EXT = 1000040003, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_EXT = 1000040004, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000040005, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000040006, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_EXT = 1000040007, +#endif + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = 1000066000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, + VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT = 1000102000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT = 1000102001, + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR = 1000116000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR = 1000116001, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR = 1000116002, + VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR = 1000116003, + VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR = 1000116004, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR = 1000116005, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR = 1000116006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001, + VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004, + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, + VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, + VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, + VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = 1000138000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = 1000138001, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = 1000138002, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = 1000138003, + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000150007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR = 1000150000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR = 1000150002, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR = 1000150003, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR = 1000150004, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR = 1000150005, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR = 1000150006, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR = 1000150009, + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR = 1000150010, + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR = 1000150011, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR = 1000150012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR = 1000150013, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR = 1000150014, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR = 1000150020, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR = 1000347000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR = 1000347001, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR = 1000150015, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR = 1000150016, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR = 1000150018, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR = 1000348013, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001, + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005, + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR = 1000163000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR = 1000163001, +#endif + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV = 1000164000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, + VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000, + VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, + VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000, + VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000, + VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_EXT = 1000187000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_CREATE_INFO_EXT = 1000187001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000187002, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT = 1000187003, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_EXT = 1000187004, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_EXT = 1000187005, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_EXT = 1000187006, +#endif + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002, + VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, + VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000192000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = 1000203000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002, + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL = 1000210000, + VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001, + VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL = 1000210002, + VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL = 1000210003, + VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL = 1000210004, + VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL = 1000210005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT = 1000212000, + VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000, + VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001, + VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = 1000215000, + VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001, + VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = 1000225000, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = 1000225001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = 1000225002, + VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000226000, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR = 1000226001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR = 1000226002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR = 1000226003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR = 1000226004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT = 1000234000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000, + VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001, + VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR = 1000239000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = 1000245000, + VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000, + VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV = 1000250000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV = 1000250001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV = 1000250002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT = 1000251000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT = 1000252000, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT = 1000255000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001, + VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = 1000259000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = 1000259001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = 1000259002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT = 1000260000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT = 1000267000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000, + VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = 1000276000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV = 1000277000, + VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV = 1000277001, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV = 1000277002, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV = 1000277003, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV = 1000277004, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV = 1000277005, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV = 1000277006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV = 1000277007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV = 1000278000, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_VIEWPORT_SCISSOR_INFO_NV = 1000278001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = 1000281001, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000, + VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT = 1000284000, + VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT = 1000284001, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT = 1000284002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT = 1000286000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT = 1000286001, + VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT = 1000287000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT = 1000287001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT = 1000287002, + VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR = 1000290000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = 1000295000, + VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = 1000295001, + VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = 1000295002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = 1000297000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000, + VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001, + VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR = 1000314000, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR = 1000314001, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR = 1000314002, + VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR = 1000314003, + VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR = 1000314004, + VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR = 1000314005, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR = 1000314006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR = 1000314007, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV = 1000314008, + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV = 1000314009, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR = 1000325000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV = 1000326000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV = 1000326001, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV = 1000326002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT = 1000330000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT = 1000332000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT = 1000332001, + VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM = 1000333000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = 1000335000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR = 1000336000, + VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = 1000337000, + VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = 1000337001, + VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR = 1000337002, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR = 1000337003, + VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR = 1000337004, + VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR = 1000337005, + VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR = 1000337006, + VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR = 1000337007, + VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = 1000337008, + VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = 1000337009, + VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = 1000337010, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT = 1000340000, + VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT = 1000346000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE = 1000351000, + VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE = 1000351002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT = 1000352000, + VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT = 1000352001, + VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT = 1000352002, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364000, + VK_STRUCTURE_TYPE_MEMORY_ZIRCON_HANDLE_PROPERTIES_FUCHSIA = 1000364001, + VK_STRUCTURE_TYPE_MEMORY_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364002, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT = 1000377000, + VK_STRUCTURE_TYPE_SCREEN_SURFACE_CREATE_INFO_QNX = 1000378000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COLOR_WRITE_ENABLE_FEATURES_EXT = 1000381000, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_WRITE_CREATE_INFO_EXT = 1000381001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkStructureType; + +typedef enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL = 1000241000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL = 1000241001, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL = 1000241002, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR = 1000024000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR = 1000024001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR = 1000024002, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR = 1000299000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR = 1000299001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR = 1000299002, +#endif + VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, + VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = 1000164003, + VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000, + VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR = 1000314000, + VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR = 1000314001, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF +} VkImageLayout; + +typedef enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000, + VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_OBJECT_TYPE_VIDEO_SESSION_KHR = 1000023000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR = 1000023001, +#endif + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000, + VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1000268000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1000277000, + VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = 1000295000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkObjectType; + +typedef enum VkVendorId { + VK_VENDOR_ID_VIV = 0x10001, + VK_VENDOR_ID_VSI = 0x10002, + VK_VENDOR_ID_KAZAN = 0x10003, + VK_VENDOR_ID_CODEPLAY = 0x10004, + VK_VENDOR_ID_MESA = 0x10005, + VK_VENDOR_ID_POCL = 0x10006, + VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF +} VkVendorId; + +typedef enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, + VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheHeaderVersion; + +typedef enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, + VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF +} VkSystemAllocationScope; + +typedef enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, + VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkInternalAllocationType; + +typedef enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, + VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000, + VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006, + VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016, + VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026, + VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027, + VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033, + VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, + VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, + VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, + VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, + VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, + VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, + VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, + VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, + VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = 1000066000, + VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = 1000066001, + VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = 1000066002, + VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = 1000066003, + VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = 1000066004, + VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = 1000066005, + VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = 1000066006, + VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = 1000066007, + VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = 1000066008, + VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = 1000066009, + VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = 1000066010, + VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = 1000066011, + VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = 1000066012, + VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = 1000066013, + VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT = 1000330000, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT = 1000330001, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT = 1000330002, + VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT = 1000330003, + VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = 1000340000, + VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = 1000340001, + VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM, + VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM, + VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + VK_FORMAT_MAX_ENUM = 0x7FFFFFFF +} VkFormat; + +typedef enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, + VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT = 1000158000, + VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF +} VkImageTiling; + +typedef enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, + VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageType; + +typedef enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, + VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkPhysicalDeviceType; + +typedef enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR = 1000023000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_QUERY_TYPE_VIDEO_ENCODE_BITSTREAM_BUFFER_RANGE_KHR = 1000299000, +#endif + VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, + VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR = 1000116000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1000150000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1000150001, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, + VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL = 1000210000, + VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkQueryType; + +typedef enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, + VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSharingMode; + +typedef enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, + VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF +} VkComponentSwizzle; + +typedef enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, + VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageViewType; + +typedef enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, + VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF +} VkBlendFactor; + +typedef enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, + VK_BLEND_OP_ZERO_EXT = 1000148000, + VK_BLEND_OP_SRC_EXT = 1000148001, + VK_BLEND_OP_DST_EXT = 1000148002, + VK_BLEND_OP_SRC_OVER_EXT = 1000148003, + VK_BLEND_OP_DST_OVER_EXT = 1000148004, + VK_BLEND_OP_SRC_IN_EXT = 1000148005, + VK_BLEND_OP_DST_IN_EXT = 1000148006, + VK_BLEND_OP_SRC_OUT_EXT = 1000148007, + VK_BLEND_OP_DST_OUT_EXT = 1000148008, + VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, + VK_BLEND_OP_DST_ATOP_EXT = 1000148010, + VK_BLEND_OP_XOR_EXT = 1000148011, + VK_BLEND_OP_MULTIPLY_EXT = 1000148012, + VK_BLEND_OP_SCREEN_EXT = 1000148013, + VK_BLEND_OP_OVERLAY_EXT = 1000148014, + VK_BLEND_OP_DARKEN_EXT = 1000148015, + VK_BLEND_OP_LIGHTEN_EXT = 1000148016, + VK_BLEND_OP_COLORDODGE_EXT = 1000148017, + VK_BLEND_OP_COLORBURN_EXT = 1000148018, + VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, + VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, + VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, + VK_BLEND_OP_EXCLUSION_EXT = 1000148022, + VK_BLEND_OP_INVERT_EXT = 1000148023, + VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, + VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, + VK_BLEND_OP_LINEARBURN_EXT = 1000148026, + VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, + VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, + VK_BLEND_OP_PINLIGHT_EXT = 1000148029, + VK_BLEND_OP_HARDMIX_EXT = 1000148030, + VK_BLEND_OP_HSL_HUE_EXT = 1000148031, + VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, + VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, + VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, + VK_BLEND_OP_PLUS_EXT = 1000148035, + VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, + VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, + VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, + VK_BLEND_OP_MINUS_EXT = 1000148039, + VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, + VK_BLEND_OP_CONTRAST_EXT = 1000148041, + VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, + VK_BLEND_OP_RED_EXT = 1000148043, + VK_BLEND_OP_GREEN_EXT = 1000148044, + VK_BLEND_OP_BLUE_EXT = 1000148045, + VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF +} VkBlendOp; + +typedef enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, + VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF +} VkCompareOp; + +typedef enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR = 1000347000, + VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004, + VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006, + VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001, + VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR = 1000226000, + VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000, + VK_DYNAMIC_STATE_CULL_MODE_EXT = 1000267000, + VK_DYNAMIC_STATE_FRONT_FACE_EXT = 1000267001, + VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT = 1000267002, + VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT = 1000267003, + VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT = 1000267004, + VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT = 1000267005, + VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT = 1000267006, + VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT = 1000267007, + VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT = 1000267008, + VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT = 1000267009, + VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT = 1000267010, + VK_DYNAMIC_STATE_STENCIL_OP_EXT = 1000267011, + VK_DYNAMIC_STATE_VERTEX_INPUT_EXT = 1000352000, + VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT = 1000377000, + VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT = 1000377001, + VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT = 1000377002, + VK_DYNAMIC_STATE_LOGIC_OP_EXT = 1000377003, + VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT = 1000377004, + VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT = 1000381000, + VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF +} VkDynamicState; + +typedef enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, + VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF +} VkFrontFace; + +typedef enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, + VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF +} VkVertexInputRate; + +typedef enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, + VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF +} VkPrimitiveTopology; + +typedef enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, + VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, + VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF +} VkPolygonMode; + +typedef enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, + VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF +} VkStencilOp; + +typedef enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, + VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF +} VkLogicOp; + +typedef enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, + VK_BORDER_COLOR_FLOAT_CUSTOM_EXT = 1000287003, + VK_BORDER_COLOR_INT_CUSTOM_EXT = 1000287004, + VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF +} VkBorderColor; + +typedef enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, + VK_FILTER_CUBIC_IMG = 1000015000, + VK_FILTER_CUBIC_EXT = VK_FILTER_CUBIC_IMG, + VK_FILTER_MAX_ENUM = 0x7FFFFFFF +} VkFilter; + +typedef enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, + VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerAddressMode; + +typedef enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, + VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerMipmapMode; + +typedef enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_DESCRIPTOR_TYPE_MUTABLE_VALVE = 1000351000, + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorType; + +typedef enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, + VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentLoadOp; + +typedef enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, + VK_ATTACHMENT_STORE_OP_NONE_QCOM = 1000301000, + VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentStoreOp; + +typedef enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, + VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR = 1000165000, + VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, + VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF +} VkPipelineBindPoint; + +typedef enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, + VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferLevel; + +typedef enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_NONE_KHR = 1000165000, + VK_INDEX_TYPE_UINT8_EXT = 1000265000, + VK_INDEX_TYPE_NONE_NV = VK_INDEX_TYPE_NONE_KHR, + VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkIndexType; + +typedef enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, + VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassContents; + +typedef enum VkAccessFlagBits { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, + VK_ACCESS_INDEX_READ_BIT = 0x00000002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, + VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, + VK_ACCESS_SHADER_READ_BIT = 0x00000020, + VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, + VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, + VK_ACCESS_HOST_READ_BIT = 0x00002000, + VK_ACCESS_HOST_WRITE_BIT = 0x00004000, + VK_ACCESS_MEMORY_READ_BIT = 0x00008000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, + VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000, + VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000, + VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000, + VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000, + VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000, + VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000, + VK_ACCESS_NONE_KHR = 0, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV, + VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAccessFlagBits; +typedef VkFlags VkAccessFlags; + +typedef enum VkImageAspectFlagBits { + VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, + VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, + VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, + VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, + VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT = 0x00000080, + VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT = 0x00000100, + VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT = 0x00000200, + VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT = 0x00000400, + VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, + VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, + VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT, + VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageAspectFlagBits; +typedef VkFlags VkImageAspectFlags; + +typedef enum VkFormatFeatureFlagBits { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000, + VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_FORMAT_FEATURE_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_FORMAT_FEATURE_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_FORMAT_FEATURE_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_FORMAT_FEATURE_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000, +#endif + VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000, + VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000, + VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, + VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFormatFeatureFlagBits; +typedef VkFlags VkFormatFeatureFlags; + +typedef enum VkImageCreateFlagBits { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, + VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100, + VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800, + VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200, + VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x00002000, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, + VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x00004000, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT, + VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT, + VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageCreateFlagBits; +typedef VkFlags VkImageCreateFlags; + +typedef enum VkSampleCountFlagBits { + VK_SAMPLE_COUNT_1_BIT = 0x00000001, + VK_SAMPLE_COUNT_2_BIT = 0x00000002, + VK_SAMPLE_COUNT_4_BIT = 0x00000004, + VK_SAMPLE_COUNT_8_BIT = 0x00000008, + VK_SAMPLE_COUNT_16_BIT = 0x00000010, + VK_SAMPLE_COUNT_32_BIT = 0x00000020, + VK_SAMPLE_COUNT_64_BIT = 0x00000040, + VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSampleCountFlagBits; +typedef VkFlags VkSampleCountFlags; + +typedef enum VkImageUsageFlagBits { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00000400, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00000800, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR = 0x00001000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00002000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00004000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR = 0x00008000, +#endif + VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00000100, + VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200, + VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, + VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageUsageFlagBits; +typedef VkFlags VkImageUsageFlags; +typedef VkFlags VkInstanceCreateFlags; + +typedef enum VkMemoryHeapFlagBits { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryHeapFlagBits; +typedef VkFlags VkMemoryHeapFlags; + +typedef enum VkMemoryPropertyFlagBits { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, + VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020, + VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040, + VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080, + VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryPropertyFlagBits; +typedef VkFlags VkMemoryPropertyFlags; + +typedef enum VkQueueFlagBits { + VK_QUEUE_GRAPHICS_BIT = 0x00000001, + VK_QUEUE_COMPUTE_BIT = 0x00000002, + VK_QUEUE_TRANSFER_BIT = 0x00000004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, + VK_QUEUE_PROTECTED_BIT = 0x00000010, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_QUEUE_VIDEO_DECODE_BIT_KHR = 0x00000020, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_QUEUE_VIDEO_ENCODE_BIT_KHR = 0x00000040, +#endif + VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueueFlagBits; +typedef VkFlags VkQueueFlags; +typedef VkFlags VkDeviceCreateFlags; + +typedef enum VkDeviceQueueCreateFlagBits { + VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001, + VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDeviceQueueCreateFlagBits; +typedef VkFlags VkDeviceQueueCreateFlags; + +typedef enum VkPipelineStageFlagBits { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, + VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, + VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000, + VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000, + VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, + VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000, + VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000, + VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV = 0x00020000, + VK_PIPELINE_STAGE_NONE_KHR = 0, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineStageFlagBits; +typedef VkFlags VkPipelineStageFlags; +typedef VkFlags VkMemoryMapFlags; + +typedef enum VkSparseMemoryBindFlagBits { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, + VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseMemoryBindFlagBits; +typedef VkFlags VkSparseMemoryBindFlags; + +typedef enum VkSparseImageFormatFlagBits { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, + VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseImageFormatFlagBits; +typedef VkFlags VkSparseImageFormatFlags; + +typedef enum VkFenceCreateFlagBits { + VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, + VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceCreateFlagBits; +typedef VkFlags VkFenceCreateFlags; +typedef VkFlags VkSemaphoreCreateFlags; + +typedef enum VkEventCreateFlagBits { + VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR = 0x00000001, + VK_EVENT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkEventCreateFlagBits; +typedef VkFlags VkEventCreateFlags; + +typedef enum VkQueryPipelineStatisticFlagBits { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, + VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPipelineStatisticFlagBits; +typedef VkFlags VkQueryPipelineStatisticFlags; +typedef VkFlags VkQueryPoolCreateFlags; + +typedef enum VkQueryResultFlagBits { + VK_QUERY_RESULT_64_BIT = 0x00000001, + VK_QUERY_RESULT_WAIT_BIT = 0x00000002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_QUERY_RESULT_WITH_STATUS_BIT_KHR = 0x00000010, +#endif + VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryResultFlagBits; +typedef VkFlags VkQueryResultFlags; + +typedef enum VkBufferCreateFlagBits { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000010, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferCreateFlagBits; +typedef VkFlags VkBufferCreateFlags; + +typedef enum VkBufferUsageFlagBits { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x00020000, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_BUFFER_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00002000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_BUFFER_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00004000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_BUFFER_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00008000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_BUFFER_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00010000, +#endif + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000, + VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000, + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000, + VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400, + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferUsageFlagBits; +typedef VkFlags VkBufferUsageFlags; +typedef VkFlags VkBufferViewCreateFlags; + +typedef enum VkImageViewCreateFlagBits { + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT = 0x00000001, + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT = 0x00000002, + VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageViewCreateFlagBits; +typedef VkFlags VkImageViewCreateFlags; + +typedef enum VkShaderModuleCreateFlagBits { + VK_SHADER_MODULE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderModuleCreateFlagBits; +typedef VkFlags VkShaderModuleCreateFlags; + +typedef enum VkPipelineCacheCreateFlagBits { + VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT = 0x00000001, + VK_PIPELINE_CACHE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheCreateFlagBits; +typedef VkFlags VkPipelineCacheCreateFlags; + +typedef enum VkColorComponentFlagBits { + VK_COLOR_COMPONENT_R_BIT = 0x00000001, + VK_COLOR_COMPONENT_G_BIT = 0x00000002, + VK_COLOR_COMPONENT_B_BIT = 0x00000004, + VK_COLOR_COMPONENT_A_BIT = 0x00000008, + VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkColorComponentFlagBits; +typedef VkFlags VkColorComponentFlags; + +typedef enum VkPipelineCreateFlagBits { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, + VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000, + VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000, + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, + VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040, + VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080, + VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00040000, + VK_PIPELINE_CREATE_LIBRARY_BIT_KHR = 0x00000800, + VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = 0x00000100, + VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = 0x00000200, + VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, + VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreateFlagBits; +typedef VkFlags VkPipelineCreateFlags; + +typedef enum VkPipelineShaderStageCreateFlagBits { + VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = 0x00000001, + VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = 0x00000002, + VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineShaderStageCreateFlagBits; +typedef VkFlags VkPipelineShaderStageCreateFlags; + +typedef enum VkShaderStageFlagBits { + VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, + VK_SHADER_STAGE_ALL = 0x7FFFFFFF, + VK_SHADER_STAGE_RAYGEN_BIT_KHR = 0x00000100, + VK_SHADER_STAGE_ANY_HIT_BIT_KHR = 0x00000200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR = 0x00000400, + VK_SHADER_STAGE_MISS_BIT_KHR = 0x00000800, + VK_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x00001000, + VK_SHADER_STAGE_CALLABLE_BIT_KHR = 0x00002000, + VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040, + VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080, + VK_SHADER_STAGE_RAYGEN_BIT_NV = VK_SHADER_STAGE_RAYGEN_BIT_KHR, + VK_SHADER_STAGE_ANY_HIT_BIT_NV = VK_SHADER_STAGE_ANY_HIT_BIT_KHR, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, + VK_SHADER_STAGE_MISS_BIT_NV = VK_SHADER_STAGE_MISS_BIT_KHR, + VK_SHADER_STAGE_INTERSECTION_BIT_NV = VK_SHADER_STAGE_INTERSECTION_BIT_KHR, + VK_SHADER_STAGE_CALLABLE_BIT_NV = VK_SHADER_STAGE_CALLABLE_BIT_KHR, + VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderStageFlagBits; + +typedef enum VkCullModeFlagBits { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x00000001, + VK_CULL_MODE_BACK_BIT = 0x00000002, + VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, + VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCullModeFlagBits; +typedef VkFlags VkCullModeFlags; +typedef VkFlags VkPipelineVertexInputStateCreateFlags; +typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; +typedef VkFlags VkPipelineTessellationStateCreateFlags; +typedef VkFlags VkPipelineViewportStateCreateFlags; +typedef VkFlags VkPipelineRasterizationStateCreateFlags; +typedef VkFlags VkPipelineMultisampleStateCreateFlags; +typedef VkFlags VkPipelineDepthStencilStateCreateFlags; +typedef VkFlags VkPipelineColorBlendStateCreateFlags; +typedef VkFlags VkPipelineDynamicStateCreateFlags; +typedef VkFlags VkPipelineLayoutCreateFlags; +typedef VkFlags VkShaderStageFlags; + +typedef enum VkSamplerCreateFlagBits { + VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x00000001, + VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x00000002, + VK_SAMPLER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSamplerCreateFlagBits; +typedef VkFlags VkSamplerCreateFlags; + +typedef enum VkDescriptorPoolCreateFlagBits { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT = 0x00000002, + VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE = 0x00000004, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorPoolCreateFlagBits; +typedef VkFlags VkDescriptorPoolCreateFlags; +typedef VkFlags VkDescriptorPoolResetFlags; + +typedef enum VkDescriptorSetLayoutCreateFlagBits { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE = 0x00000004, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorSetLayoutCreateFlagBits; +typedef VkFlags VkDescriptorSetLayoutCreateFlags; + +typedef enum VkAttachmentDescriptionFlagBits { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, + VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentDescriptionFlagBits; +typedef VkFlags VkAttachmentDescriptionFlags; + +typedef enum VkDependencyFlagBits { + VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, + VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004, + VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002, + VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT, + VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDependencyFlagBits; +typedef VkFlags VkDependencyFlags; + +typedef enum VkFramebufferCreateFlagBits { + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT = 0x00000001, + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, + VK_FRAMEBUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFramebufferCreateFlagBits; +typedef VkFlags VkFramebufferCreateFlags; + +typedef enum VkRenderPassCreateFlagBits { + VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM = 0x00000002, + VK_RENDER_PASS_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkRenderPassCreateFlagBits; +typedef VkFlags VkRenderPassCreateFlags; + +typedef enum VkSubpassDescriptionFlagBits { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, + VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM = 0x00000004, + VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM = 0x00000008, + VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassDescriptionFlagBits; +typedef VkFlags VkSubpassDescriptionFlags; + +typedef enum VkCommandPoolCreateFlagBits { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, + VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004, + VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolCreateFlagBits; +typedef VkFlags VkCommandPoolCreateFlags; + +typedef enum VkCommandPoolResetFlagBits { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolResetFlagBits; +typedef VkFlags VkCommandPoolResetFlags; + +typedef enum VkCommandBufferUsageFlagBits { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, + VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferUsageFlagBits; +typedef VkFlags VkCommandBufferUsageFlags; + +typedef enum VkQueryControlFlagBits { + VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, + VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryControlFlagBits; +typedef VkFlags VkQueryControlFlags; + +typedef enum VkCommandBufferResetFlagBits { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferResetFlagBits; +typedef VkFlags VkCommandBufferResetFlags; + +typedef enum VkStencilFaceFlagBits { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003, + VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK, + VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkStencilFaceFlagBits; +typedef VkFlags VkStencilFaceFlags; +typedef struct VkExtent2D { + uint32_t width; + uint32_t height; +} VkExtent2D; + +typedef struct VkExtent3D { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkExtent3D; + +typedef struct VkOffset2D { + int32_t x; + int32_t y; +} VkOffset2D; + +typedef struct VkOffset3D { + int32_t x; + int32_t y; + int32_t z; +} VkOffset3D; + +typedef struct VkRect2D { + VkOffset2D offset; + VkExtent2D extent; +} VkRect2D; + +typedef struct VkBaseInStructure { + VkStructureType sType; + const struct VkBaseInStructure* pNext; +} VkBaseInStructure; + +typedef struct VkBaseOutStructure { + VkStructureType sType; + struct VkBaseOutStructure* pNext; +} VkBaseOutStructure; + +typedef struct VkBufferMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier; + +typedef struct VkDispatchIndirectCommand { + uint32_t x; + uint32_t y; + uint32_t z; +} VkDispatchIndirectCommand; + +typedef struct VkDrawIndexedIndirectCommand { + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; +} VkDrawIndexedIndirectCommand; + +typedef struct VkDrawIndirectCommand { + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; +} VkDrawIndirectCommand; + +typedef struct VkImageSubresourceRange { + VkImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceRange; + +typedef struct VkImageMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier; + +typedef struct VkMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; +} VkMemoryBarrier; + +typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + +typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); +typedef struct VkAllocationCallbacks { + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; +} VkAllocationCallbacks; + +typedef struct VkApplicationInfo { + VkStructureType sType; + const void* pNext; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; +} VkApplicationInfo; + +typedef struct VkFormatProperties { + VkFormatFeatureFlags linearTilingFeatures; + VkFormatFeatureFlags optimalTilingFeatures; + VkFormatFeatureFlags bufferFeatures; +} VkFormatProperties; + +typedef struct VkImageFormatProperties { + VkExtent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + VkSampleCountFlags sampleCounts; + VkDeviceSize maxResourceSize; +} VkImageFormatProperties; + +typedef struct VkInstanceCreateInfo { + VkStructureType sType; + const void* pNext; + VkInstanceCreateFlags flags; + const VkApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; +} VkInstanceCreateInfo; + +typedef struct VkMemoryHeap { + VkDeviceSize size; + VkMemoryHeapFlags flags; +} VkMemoryHeap; + +typedef struct VkMemoryType { + VkMemoryPropertyFlags propertyFlags; + uint32_t heapIndex; +} VkMemoryType; + +typedef struct VkPhysicalDeviceFeatures { + VkBool32 robustBufferAccess; + VkBool32 fullDrawIndexUint32; + VkBool32 imageCubeArray; + VkBool32 independentBlend; + VkBool32 geometryShader; + VkBool32 tessellationShader; + VkBool32 sampleRateShading; + VkBool32 dualSrcBlend; + VkBool32 logicOp; + VkBool32 multiDrawIndirect; + VkBool32 drawIndirectFirstInstance; + VkBool32 depthClamp; + VkBool32 depthBiasClamp; + VkBool32 fillModeNonSolid; + VkBool32 depthBounds; + VkBool32 wideLines; + VkBool32 largePoints; + VkBool32 alphaToOne; + VkBool32 multiViewport; + VkBool32 samplerAnisotropy; + VkBool32 textureCompressionETC2; + VkBool32 textureCompressionASTC_LDR; + VkBool32 textureCompressionBC; + VkBool32 occlusionQueryPrecise; + VkBool32 pipelineStatisticsQuery; + VkBool32 vertexPipelineStoresAndAtomics; + VkBool32 fragmentStoresAndAtomics; + VkBool32 shaderTessellationAndGeometryPointSize; + VkBool32 shaderImageGatherExtended; + VkBool32 shaderStorageImageExtendedFormats; + VkBool32 shaderStorageImageMultisample; + VkBool32 shaderStorageImageReadWithoutFormat; + VkBool32 shaderStorageImageWriteWithoutFormat; + VkBool32 shaderUniformBufferArrayDynamicIndexing; + VkBool32 shaderSampledImageArrayDynamicIndexing; + VkBool32 shaderStorageBufferArrayDynamicIndexing; + VkBool32 shaderStorageImageArrayDynamicIndexing; + VkBool32 shaderClipDistance; + VkBool32 shaderCullDistance; + VkBool32 shaderFloat64; + VkBool32 shaderInt64; + VkBool32 shaderInt16; + VkBool32 shaderResourceResidency; + VkBool32 shaderResourceMinLod; + VkBool32 sparseBinding; + VkBool32 sparseResidencyBuffer; + VkBool32 sparseResidencyImage2D; + VkBool32 sparseResidencyImage3D; + VkBool32 sparseResidency2Samples; + VkBool32 sparseResidency4Samples; + VkBool32 sparseResidency8Samples; + VkBool32 sparseResidency16Samples; + VkBool32 sparseResidencyAliased; + VkBool32 variableMultisampleRate; + VkBool32 inheritedQueries; +} VkPhysicalDeviceFeatures; + +typedef struct VkPhysicalDeviceLimits { + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + VkDeviceSize bufferImageGranularity; + VkDeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + VkDeviceSize minTexelBufferOffsetAlignment; + VkDeviceSize minUniformBufferOffsetAlignment; + VkDeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + VkSampleCountFlags framebufferColorSampleCounts; + VkSampleCountFlags framebufferDepthSampleCounts; + VkSampleCountFlags framebufferStencilSampleCounts; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + VkSampleCountFlags sampledImageColorSampleCounts; + VkSampleCountFlags sampledImageIntegerSampleCounts; + VkSampleCountFlags sampledImageDepthSampleCounts; + VkSampleCountFlags sampledImageStencilSampleCounts; + VkSampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + VkBool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + VkBool32 strictLines; + VkBool32 standardSampleLocations; + VkDeviceSize optimalBufferCopyOffsetAlignment; + VkDeviceSize optimalBufferCopyRowPitchAlignment; + VkDeviceSize nonCoherentAtomSize; +} VkPhysicalDeviceLimits; + +typedef struct VkPhysicalDeviceMemoryProperties { + uint32_t memoryTypeCount; + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryProperties; + +typedef struct VkPhysicalDeviceSparseProperties { + VkBool32 residencyStandard2DBlockShape; + VkBool32 residencyStandard2DMultisampleBlockShape; + VkBool32 residencyStandard3DBlockShape; + VkBool32 residencyAlignedMipSize; + VkBool32 residencyNonResidentStrict; +} VkPhysicalDeviceSparseProperties; + +typedef struct VkPhysicalDeviceProperties { + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +} VkPhysicalDeviceProperties; + +typedef struct VkQueueFamilyProperties { + VkQueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + VkExtent3D minImageTransferGranularity; +} VkQueueFamilyProperties; + +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +typedef struct VkExtensionProperties { + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; +} VkExtensionProperties; + +typedef struct VkLayerProperties { + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkLayerProperties; + +typedef struct VkSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + const VkPipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const VkCommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkSubmitInfo; + +typedef struct VkMappedMemoryRange { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMappedMemoryRange; + +typedef struct VkMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeIndex; +} VkMemoryAllocateInfo; + +typedef struct VkMemoryRequirements { + VkDeviceSize size; + VkDeviceSize alignment; + uint32_t memoryTypeBits; +} VkMemoryRequirements; + +typedef struct VkSparseMemoryBind { + VkDeviceSize resourceOffset; + VkDeviceSize size; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseMemoryBind; + +typedef struct VkSparseBufferMemoryBindInfo { + VkBuffer buffer; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseBufferMemoryBindInfo; + +typedef struct VkSparseImageOpaqueMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseImageOpaqueMemoryBindInfo; + +typedef struct VkImageSubresource { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; +} VkImageSubresource; + +typedef struct VkSparseImageMemoryBind { + VkImageSubresource subresource; + VkOffset3D offset; + VkExtent3D extent; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseImageMemoryBind; + +typedef struct VkSparseImageMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseImageMemoryBind* pBinds; +} VkSparseImageMemoryBindInfo; + +typedef struct VkBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const VkSparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const VkSparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkBindSparseInfo; + +typedef struct VkSparseImageFormatProperties { + VkImageAspectFlags aspectMask; + VkExtent3D imageGranularity; + VkSparseImageFormatFlags flags; +} VkSparseImageFormatProperties; + +typedef struct VkSparseImageMemoryRequirements { + VkSparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + VkDeviceSize imageMipTailSize; + VkDeviceSize imageMipTailOffset; + VkDeviceSize imageMipTailStride; +} VkSparseImageMemoryRequirements; + +typedef struct VkFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFenceCreateFlags flags; +} VkFenceCreateInfo; + +typedef struct VkSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreCreateFlags flags; +} VkSemaphoreCreateInfo; + +typedef struct VkEventCreateInfo { + VkStructureType sType; + const void* pNext; + VkEventCreateFlags flags; +} VkEventCreateInfo; + +typedef struct VkQueryPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueryPoolCreateFlags flags; + VkQueryType queryType; + uint32_t queryCount; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkQueryPoolCreateInfo; + +typedef struct VkBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkDeviceSize size; + VkBufferUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkBufferCreateInfo; + +typedef struct VkBufferViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferViewCreateFlags flags; + VkBuffer buffer; + VkFormat format; + VkDeviceSize offset; + VkDeviceSize range; +} VkBufferViewCreateInfo; + +typedef struct VkImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageType imageType; + VkFormat format; + VkExtent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + VkSampleCountFlagBits samples; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkImageLayout initialLayout; +} VkImageCreateInfo; + +typedef struct VkSubresourceLayout { + VkDeviceSize offset; + VkDeviceSize size; + VkDeviceSize rowPitch; + VkDeviceSize arrayPitch; + VkDeviceSize depthPitch; +} VkSubresourceLayout; + +typedef struct VkComponentMapping { + VkComponentSwizzle r; + VkComponentSwizzle g; + VkComponentSwizzle b; + VkComponentSwizzle a; +} VkComponentMapping; + +typedef struct VkImageViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageViewCreateFlags flags; + VkImage image; + VkImageViewType viewType; + VkFormat format; + VkComponentMapping components; + VkImageSubresourceRange subresourceRange; +} VkImageViewCreateInfo; + +typedef struct VkShaderModuleCreateInfo { + VkStructureType sType; + const void* pNext; + VkShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; +} VkShaderModuleCreateInfo; + +typedef struct VkPipelineCacheCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; +} VkPipelineCacheCreateInfo; + +typedef struct VkSpecializationMapEntry { + uint32_t constantID; + uint32_t offset; + size_t size; +} VkSpecializationMapEntry; + +typedef struct VkSpecializationInfo { + uint32_t mapEntryCount; + const VkSpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; +} VkSpecializationInfo; + +typedef struct VkPipelineShaderStageCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineShaderStageCreateFlags flags; + VkShaderStageFlagBits stage; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; +} VkPipelineShaderStageCreateInfo; + +typedef struct VkComputePipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + VkPipelineShaderStageCreateInfo stage; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkComputePipelineCreateInfo; + +typedef struct VkVertexInputBindingDescription { + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; +} VkVertexInputBindingDescription; + +typedef struct VkVertexInputAttributeDescription { + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription; + +typedef struct VkPipelineVertexInputStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VkVertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; +} VkPipelineVertexInputStateCreateInfo; + +typedef struct VkPipelineInputAssemblyStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineInputAssemblyStateCreateFlags flags; + VkPrimitiveTopology topology; + VkBool32 primitiveRestartEnable; +} VkPipelineInputAssemblyStateCreateInfo; + +typedef struct VkPipelineTessellationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; +} VkPipelineTessellationStateCreateInfo; + +typedef struct VkViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} VkViewport; + +typedef struct VkPipelineViewportStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const VkViewport* pViewports; + uint32_t scissorCount; + const VkRect2D* pScissors; +} VkPipelineViewportStateCreateInfo; + +typedef struct VkPipelineRasterizationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateCreateFlags flags; + VkBool32 depthClampEnable; + VkBool32 rasterizerDiscardEnable; + VkPolygonMode polygonMode; + VkCullModeFlags cullMode; + VkFrontFace frontFace; + VkBool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; +} VkPipelineRasterizationStateCreateInfo; + +typedef struct VkPipelineMultisampleStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineMultisampleStateCreateFlags flags; + VkSampleCountFlagBits rasterizationSamples; + VkBool32 sampleShadingEnable; + float minSampleShading; + const VkSampleMask* pSampleMask; + VkBool32 alphaToCoverageEnable; + VkBool32 alphaToOneEnable; +} VkPipelineMultisampleStateCreateInfo; + +typedef struct VkStencilOpState { + VkStencilOp failOp; + VkStencilOp passOp; + VkStencilOp depthFailOp; + VkCompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; +} VkStencilOpState; + +typedef struct VkPipelineDepthStencilStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDepthStencilStateCreateFlags flags; + VkBool32 depthTestEnable; + VkBool32 depthWriteEnable; + VkCompareOp depthCompareOp; + VkBool32 depthBoundsTestEnable; + VkBool32 stencilTestEnable; + VkStencilOpState front; + VkStencilOpState back; + float minDepthBounds; + float maxDepthBounds; +} VkPipelineDepthStencilStateCreateInfo; + +typedef struct VkPipelineColorBlendAttachmentState { + VkBool32 blendEnable; + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; + VkColorComponentFlags colorWriteMask; +} VkPipelineColorBlendAttachmentState; + +typedef struct VkPipelineColorBlendStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineColorBlendStateCreateFlags flags; + VkBool32 logicOpEnable; + VkLogicOp logicOp; + uint32_t attachmentCount; + const VkPipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; +} VkPipelineColorBlendStateCreateInfo; + +typedef struct VkPipelineDynamicStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const VkDynamicState* pDynamicStates; +} VkPipelineDynamicStateCreateInfo; + +typedef struct VkGraphicsPipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; + const VkPipelineViewportStateCreateInfo* pViewportState; + const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkRenderPass renderPass; + uint32_t subpass; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkGraphicsPipelineCreateInfo; + +typedef struct VkPushConstantRange { + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; +} VkPushConstantRange; + +typedef struct VkPipelineLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkPipelineLayoutCreateInfo; + +typedef struct VkSamplerCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerCreateFlags flags; + VkFilter magFilter; + VkFilter minFilter; + VkSamplerMipmapMode mipmapMode; + VkSamplerAddressMode addressModeU; + VkSamplerAddressMode addressModeV; + VkSamplerAddressMode addressModeW; + float mipLodBias; + VkBool32 anisotropyEnable; + float maxAnisotropy; + VkBool32 compareEnable; + VkCompareOp compareOp; + float minLod; + float maxLod; + VkBorderColor borderColor; + VkBool32 unnormalizedCoordinates; +} VkSamplerCreateInfo; + +typedef struct VkCopyDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; +} VkCopyDescriptorSet; + +typedef struct VkDescriptorBufferInfo { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize range; +} VkDescriptorBufferInfo; + +typedef struct VkDescriptorImageInfo { + VkSampler sampler; + VkImageView imageView; + VkImageLayout imageLayout; +} VkDescriptorImageInfo; + +typedef struct VkDescriptorPoolSize { + VkDescriptorType type; + uint32_t descriptorCount; +} VkDescriptorPoolSize; + +typedef struct VkDescriptorPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const VkDescriptorPoolSize* pPoolSizes; +} VkDescriptorPoolCreateInfo; + +typedef struct VkDescriptorSetAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkDescriptorSetAllocateInfo; + +typedef struct VkDescriptorSetLayoutBinding { + uint32_t binding; + VkDescriptorType descriptorType; + uint32_t descriptorCount; + VkShaderStageFlags stageFlags; + const VkSampler* pImmutableSamplers; +} VkDescriptorSetLayoutBinding; + +typedef struct VkDescriptorSetLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const VkDescriptorSetLayoutBinding* pBindings; +} VkDescriptorSetLayoutCreateInfo; + +typedef struct VkWriteDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + const VkDescriptorImageInfo* pImageInfo; + const VkDescriptorBufferInfo* pBufferInfo; + const VkBufferView* pTexelBufferView; +} VkWriteDescriptorSet; + +typedef struct VkAttachmentDescription { + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription; + +typedef struct VkAttachmentReference { + uint32_t attachment; + VkImageLayout layout; +} VkAttachmentReference; + +typedef struct VkFramebufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkFramebufferCreateFlags flags; + VkRenderPass renderPass; + uint32_t attachmentCount; + const VkImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; +} VkFramebufferCreateInfo; + +typedef struct VkSubpassDescription { + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const VkAttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference* pColorAttachments; + const VkAttachmentReference* pResolveAttachments; + const VkAttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription; + +typedef struct VkSubpassDependency { + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; +} VkSubpassDependency; + +typedef struct VkRenderPassCreateInfo { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency* pDependencies; +} VkRenderPassCreateInfo; + +typedef struct VkCommandPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; +} VkCommandPoolCreateInfo; + +typedef struct VkCommandBufferAllocateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPool commandPool; + VkCommandBufferLevel level; + uint32_t commandBufferCount; +} VkCommandBufferAllocateInfo; + +typedef struct VkCommandBufferInheritanceInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + uint32_t subpass; + VkFramebuffer framebuffer; + VkBool32 occlusionQueryEnable; + VkQueryControlFlags queryFlags; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkCommandBufferInheritanceInfo; + +typedef struct VkCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + VkCommandBufferUsageFlags flags; + const VkCommandBufferInheritanceInfo* pInheritanceInfo; +} VkCommandBufferBeginInfo; + +typedef struct VkBufferCopy { + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy; + +typedef struct VkImageSubresourceLayers { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceLayers; + +typedef struct VkBufferImageCopy { + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy; + +typedef union VkClearColorValue { + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; +} VkClearColorValue; + +typedef struct VkClearDepthStencilValue { + float depth; + uint32_t stencil; +} VkClearDepthStencilValue; + +typedef union VkClearValue { + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +} VkClearValue; + +typedef struct VkClearAttachment { + VkImageAspectFlags aspectMask; + uint32_t colorAttachment; + VkClearValue clearValue; +} VkClearAttachment; + +typedef struct VkClearRect { + VkRect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkClearRect; + +typedef struct VkImageBlit { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit; + +typedef struct VkImageCopy { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy; + +typedef struct VkImageResolve { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve; + +typedef struct VkRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + VkFramebuffer framebuffer; + VkRect2D renderArea; + uint32_t clearValueCount; + const VkClearValue* pClearValues; +} VkRenderPassBeginInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); +typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); +typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); +typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); +typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); +typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); +typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); +typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); +typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); +typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); +typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); +typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); +typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); +typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); +typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); +typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); +typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); +typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); + +VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( + VkInstance instance, + const char* pName); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( + VkDevice device, + const char* pName); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( + VkDevice device, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( + VkDevice device, + uint32_t queueFamilyIndex, + uint32_t queueIndex, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo* pSubmits, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( + VkQueue queue); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( + VkDevice device, + const VkMemoryAllocateInfo* pAllocateInfo, + const VkAllocationCallbacks* pAllocator, + VkDeviceMemory* pMemory); + +VKAPI_ATTR void VKAPI_CALL vkFreeMemory( + VkDevice device, + VkDeviceMemory memory, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize offset, + VkDeviceSize size, + VkMemoryMapFlags flags, + void** ppData); + +VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( + VkDevice device, + VkDeviceMemory memory); + +VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize* pCommittedMemoryInBytes); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( + VkDevice device, + VkBuffer buffer, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( + VkDevice device, + VkImage image, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( + VkDevice device, + VkBuffer buffer, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( + VkDevice device, + VkImage image, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( + VkDevice device, + VkImage image, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( + VkQueue queue, + uint32_t bindInfoCount, + const VkBindSparseInfo* pBindInfo, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( + VkDevice device, + const VkFenceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFence( + VkDevice device, + VkFence fence, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( + VkDevice device, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences, + VkBool32 waitAll, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( + VkDevice device, + const VkSemaphoreCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSemaphore* pSemaphore); + +VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( + VkDevice device, + VkSemaphore semaphore, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( + VkDevice device, + const VkEventCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkEvent* pEvent); + +VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( + VkDevice device, + VkEvent event, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( + VkDevice device, + const VkQueryPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkQueryPool* pQueryPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( + VkDevice device, + VkQueryPool queryPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + void* pData, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( + VkDevice device, + const VkBufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBuffer* pBuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( + VkDevice device, + VkBuffer buffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( + VkDevice device, + const VkBufferViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBufferView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( + VkDevice device, + VkBufferView bufferView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( + VkDevice device, + const VkImageCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImage* pImage); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImage( + VkDevice device, + VkImage image, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( + VkDevice device, + VkImage image, + const VkImageSubresource* pSubresource, + VkSubresourceLayout* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( + VkDevice device, + const VkImageViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImageView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( + VkDevice device, + VkImageView imageView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkShaderModule* pShaderModule); + +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( + VkDevice device, + VkShaderModule shaderModule, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( + VkDevice device, + const VkPipelineCacheCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineCache* pPipelineCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( + VkDevice device, + VkPipelineCache pipelineCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( + VkDevice device, + VkPipelineCache pipelineCache, + size_t* pDataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( + VkDevice device, + VkPipelineCache dstCache, + uint32_t srcCacheCount, + const VkPipelineCache* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( + VkDevice device, + VkPipeline pipeline, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( + VkDevice device, + const VkPipelineLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineLayout* pPipelineLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( + VkDevice device, + VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( + VkDevice device, + const VkSamplerCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSampler* pSampler); + +VKAPI_ATTR void VKAPI_CALL vkDestroySampler( + VkDevice device, + VkSampler sampler, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorSetLayout* pSetLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( + VkDevice device, + VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( + VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( + VkDevice device, + VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( + VkDevice device, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet* pDescriptorCopies); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( + VkDevice device, + const VkFramebufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFramebuffer* pFramebuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( + VkDevice device, + VkFramebuffer framebuffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( + VkDevice device, + const VkRenderPassCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( + VkDevice device, + VkRenderPass renderPass, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( + VkDevice device, + VkRenderPass renderPass, + VkExtent2D* pGranularity); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( + VkDevice device, + const VkCommandPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCommandPool* pCommandPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( + VkDevice device, + VkCommandPool commandPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( + VkDevice device, + const VkCommandBufferAllocateInfo* pAllocateInfo, + VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( + VkDevice device, + VkCommandPool commandPool, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( + VkCommandBuffer commandBuffer, + VkCommandBufferResetFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( + VkCommandBuffer commandBuffer, + uint32_t firstScissor, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( + VkCommandBuffer commandBuffer, + float lineWidth); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( + VkCommandBuffer commandBuffer, + float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( + VkCommandBuffer commandBuffer, + const float blendConstants[4]); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( + VkCommandBuffer commandBuffer, + float minDepthBounds, + float maxDepthBounds); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t compareMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t writeMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t reference); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets, + uint32_t dynamicOffsetCount, + const uint32_t* pDynamicOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdDraw( + VkCommandBuffer commandBuffer, + uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( + VkCommandBuffer commandBuffer, + uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageBlit* pRegions, + VkFilter filter); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize dataSize, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize size, + uint32_t data); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearColorValue* pColor, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue* pDepthStencil, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkClearAttachment* pAttachments, + uint32_t rectCount, + const VkClearRect* pRects); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageResolve* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( + VkCommandBuffer commandBuffer, + VkPipelineLayout layout, + VkShaderStageFlags stageFlags, + uint32_t offset, + uint32_t size, + const void* pValues); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( + VkCommandBuffer commandBuffer, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); +#endif + + +#define VK_VERSION_1_1 1 +// Vulkan 1.1 version number +#define VK_API_VERSION_1_1 VK_MAKE_API_VERSION(0, 1, 1, 0)// Patch version should always be set to 0 + +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) +#define VK_MAX_DEVICE_GROUP_SIZE 32U +#define VK_LUID_SIZE 8U +#define VK_QUEUE_FAMILY_EXTERNAL (~1U) + +typedef enum VkPointClippingBehavior { + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1, + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPointClippingBehavior; + +typedef enum VkTessellationDomainOrigin { + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1, + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF +} VkTessellationDomainOrigin; + +typedef enum VkSamplerYcbcrModelConversion { + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrModelConversion; + +typedef enum VkSamplerYcbcrRange { + VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1, + VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrRange; + +typedef enum VkChromaLocation { + VK_CHROMA_LOCATION_COSITED_EVEN = 0, + VK_CHROMA_LOCATION_MIDPOINT = 1, + VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF +} VkChromaLocation; + +typedef enum VkDescriptorUpdateTemplateType { + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorUpdateTemplateType; + +typedef enum VkSubgroupFeatureFlagBits { + VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, + VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, + VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, + VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, + VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, + VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, + VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, + VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, + VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, + VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubgroupFeatureFlagBits; +typedef VkFlags VkSubgroupFeatureFlags; + +typedef enum VkPeerMemoryFeatureFlagBits { + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008, + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, + VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPeerMemoryFeatureFlagBits; +typedef VkFlags VkPeerMemoryFeatureFlags; + +typedef enum VkMemoryAllocateFlagBits { + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT = 0x00000002, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000004, + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryAllocateFlagBits; +typedef VkFlags VkMemoryAllocateFlags; +typedef VkFlags VkCommandPoolTrimFlags; +typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; + +typedef enum VkExternalMemoryHandleTypeFlagBits { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA = 0x00000800, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBits; +typedef VkFlags VkExternalMemoryHandleTypeFlags; + +typedef enum VkExternalMemoryFeatureFlagBits { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBits; +typedef VkFlags VkExternalMemoryFeatureFlags; + +typedef enum VkExternalFenceHandleTypeFlagBits { + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceHandleTypeFlagBits; +typedef VkFlags VkExternalFenceHandleTypeFlags; + +typedef enum VkExternalFenceFeatureFlagBits { + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceFeatureFlagBits; +typedef VkFlags VkExternalFenceFeatureFlags; + +typedef enum VkFenceImportFlagBits { + VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT, + VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceImportFlagBits; +typedef VkFlags VkFenceImportFlags; + +typedef enum VkSemaphoreImportFlagBits { + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreImportFlagBits; +typedef VkFlags VkSemaphoreImportFlags; + +typedef enum VkExternalSemaphoreHandleTypeFlagBits { + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA = 0x00000080, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE_BIT = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreHandleTypeFlagBits; +typedef VkFlags VkExternalSemaphoreHandleTypeFlags; + +typedef enum VkExternalSemaphoreFeatureFlagBits { + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreFeatureFlagBits; +typedef VkFlags VkExternalSemaphoreFeatureFlags; +typedef struct VkPhysicalDeviceSubgroupProperties { + VkStructureType sType; + void* pNext; + uint32_t subgroupSize; + VkShaderStageFlags supportedStages; + VkSubgroupFeatureFlags supportedOperations; + VkBool32 quadOperationsInAllStages; +} VkPhysicalDeviceSubgroupProperties; + +typedef struct VkBindBufferMemoryInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindBufferMemoryInfo; + +typedef struct VkBindImageMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindImageMemoryInfo; + +typedef struct VkPhysicalDevice16BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; +} VkPhysicalDevice16BitStorageFeatures; + +typedef struct VkMemoryDedicatedRequirements { + VkStructureType sType; + void* pNext; + VkBool32 prefersDedicatedAllocation; + VkBool32 requiresDedicatedAllocation; +} VkMemoryDedicatedRequirements; + +typedef struct VkMemoryDedicatedAllocateInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkMemoryDedicatedAllocateInfo; + +typedef struct VkMemoryAllocateFlagsInfo { + VkStructureType sType; + const void* pNext; + VkMemoryAllocateFlags flags; + uint32_t deviceMask; +} VkMemoryAllocateFlagsInfo; + +typedef struct VkDeviceGroupRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const VkRect2D* pDeviceRenderAreas; +} VkDeviceGroupRenderPassBeginInfo; + +typedef struct VkDeviceGroupCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; +} VkDeviceGroupCommandBufferBeginInfo; + +typedef struct VkDeviceGroupSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; +} VkDeviceGroupSubmitInfo; + +typedef struct VkDeviceGroupBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; +} VkDeviceGroupBindSparseInfo; + +typedef struct VkBindBufferMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindBufferMemoryDeviceGroupInfo; + +typedef struct VkBindImageMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t splitInstanceBindRegionCount; + const VkRect2D* pSplitInstanceBindRegions; +} VkBindImageMemoryDeviceGroupInfo; + +typedef struct VkPhysicalDeviceGroupProperties { + VkStructureType sType; + void* pNext; + uint32_t physicalDeviceCount; + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; + VkBool32 subsetAllocation; +} VkPhysicalDeviceGroupProperties; + +typedef struct VkDeviceGroupDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t physicalDeviceCount; + const VkPhysicalDevice* pPhysicalDevices; +} VkDeviceGroupDeviceCreateInfo; + +typedef struct VkBufferMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferMemoryRequirementsInfo2; + +typedef struct VkImageMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageMemoryRequirementsInfo2; + +typedef struct VkImageSparseMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageSparseMemoryRequirementsInfo2; + +typedef struct VkMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkMemoryRequirements memoryRequirements; +} VkMemoryRequirements2; + +typedef struct VkSparseImageMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkSparseImageMemoryRequirements memoryRequirements; +} VkSparseImageMemoryRequirements2; + +typedef struct VkPhysicalDeviceFeatures2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceFeatures features; +} VkPhysicalDeviceFeatures2; + +typedef struct VkPhysicalDeviceProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties properties; +} VkPhysicalDeviceProperties2; + +typedef struct VkFormatProperties2 { + VkStructureType sType; + void* pNext; + VkFormatProperties formatProperties; +} VkFormatProperties2; + +typedef struct VkImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkImageFormatProperties imageFormatProperties; +} VkImageFormatProperties2; + +typedef struct VkPhysicalDeviceImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkImageCreateFlags flags; +} VkPhysicalDeviceImageFormatInfo2; + +typedef struct VkQueueFamilyProperties2 { + VkStructureType sType; + void* pNext; + VkQueueFamilyProperties queueFamilyProperties; +} VkQueueFamilyProperties2; + +typedef struct VkPhysicalDeviceMemoryProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceMemoryProperties memoryProperties; +} VkPhysicalDeviceMemoryProperties2; + +typedef struct VkSparseImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkSparseImageFormatProperties properties; +} VkSparseImageFormatProperties2; + +typedef struct VkPhysicalDeviceSparseImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkSampleCountFlagBits samples; + VkImageUsageFlags usage; + VkImageTiling tiling; +} VkPhysicalDeviceSparseImageFormatInfo2; + +typedef struct VkPhysicalDevicePointClippingProperties { + VkStructureType sType; + void* pNext; + VkPointClippingBehavior pointClippingBehavior; +} VkPhysicalDevicePointClippingProperties; + +typedef struct VkInputAttachmentAspectReference { + uint32_t subpass; + uint32_t inputAttachmentIndex; + VkImageAspectFlags aspectMask; +} VkInputAttachmentAspectReference; + +typedef struct VkRenderPassInputAttachmentAspectCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t aspectReferenceCount; + const VkInputAttachmentAspectReference* pAspectReferences; +} VkRenderPassInputAttachmentAspectCreateInfo; + +typedef struct VkImageViewUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags usage; +} VkImageViewUsageCreateInfo; + +typedef struct VkPipelineTessellationDomainOriginStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkTessellationDomainOrigin domainOrigin; +} VkPipelineTessellationDomainOriginStateCreateInfo; + +typedef struct VkRenderPassMultiviewCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; +} VkRenderPassMultiviewCreateInfo; + +typedef struct VkPhysicalDeviceMultiviewFeatures { + VkStructureType sType; + void* pNext; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; +} VkPhysicalDeviceMultiviewFeatures; + +typedef struct VkPhysicalDeviceMultiviewProperties { + VkStructureType sType; + void* pNext; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; +} VkPhysicalDeviceMultiviewProperties; + +typedef struct VkPhysicalDeviceVariablePointersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; +} VkPhysicalDeviceVariablePointersFeatures; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 protectedMemory; +} VkPhysicalDeviceProtectedMemoryFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryProperties { + VkStructureType sType; + void* pNext; + VkBool32 protectedNoFault; +} VkPhysicalDeviceProtectedMemoryProperties; + +typedef struct VkDeviceQueueInfo2 { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueIndex; +} VkDeviceQueueInfo2; + +typedef struct VkProtectedSubmitInfo { + VkStructureType sType; + const void* pNext; + VkBool32 protectedSubmit; +} VkProtectedSubmitInfo; + +typedef struct VkSamplerYcbcrConversionCreateInfo { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkSamplerYcbcrModelConversion ycbcrModel; + VkSamplerYcbcrRange ycbcrRange; + VkComponentMapping components; + VkChromaLocation xChromaOffset; + VkChromaLocation yChromaOffset; + VkFilter chromaFilter; + VkBool32 forceExplicitReconstruction; +} VkSamplerYcbcrConversionCreateInfo; + +typedef struct VkSamplerYcbcrConversionInfo { + VkStructureType sType; + const void* pNext; + VkSamplerYcbcrConversion conversion; +} VkSamplerYcbcrConversionInfo; + +typedef struct VkBindImagePlaneMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkBindImagePlaneMemoryInfo; + +typedef struct VkImagePlaneMemoryRequirementsInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkImagePlaneMemoryRequirementsInfo; + +typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures { + VkStructureType sType; + void* pNext; + VkBool32 samplerYcbcrConversion; +} VkPhysicalDeviceSamplerYcbcrConversionFeatures; + +typedef struct VkSamplerYcbcrConversionImageFormatProperties { + VkStructureType sType; + void* pNext; + uint32_t combinedImageSamplerDescriptorCount; +} VkSamplerYcbcrConversionImageFormatProperties; + +typedef struct VkDescriptorUpdateTemplateEntry { + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + size_t offset; + size_t stride; +} VkDescriptorUpdateTemplateEntry; + +typedef struct VkDescriptorUpdateTemplateCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorUpdateTemplateCreateFlags flags; + uint32_t descriptorUpdateEntryCount; + const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; + VkDescriptorUpdateTemplateType templateType; + VkDescriptorSetLayout descriptorSetLayout; + VkPipelineBindPoint pipelineBindPoint; + VkPipelineLayout pipelineLayout; + uint32_t set; +} VkDescriptorUpdateTemplateCreateInfo; + +typedef struct VkExternalMemoryProperties { + VkExternalMemoryFeatureFlags externalMemoryFeatures; + VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlags compatibleHandleTypes; +} VkExternalMemoryProperties; + +typedef struct VkPhysicalDeviceExternalImageFormatInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalImageFormatInfo; + +typedef struct VkExternalImageFormatProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalImageFormatProperties; + +typedef struct VkPhysicalDeviceExternalBufferInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkBufferUsageFlags usage; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalBufferInfo; + +typedef struct VkExternalBufferProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalBufferProperties; + +typedef struct VkPhysicalDeviceIDProperties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; +} VkPhysicalDeviceIDProperties; + +typedef struct VkExternalMemoryImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryImageCreateInfo; + +typedef struct VkExternalMemoryBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryBufferCreateInfo; + +typedef struct VkExportMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExportMemoryAllocateInfo; + +typedef struct VkPhysicalDeviceExternalFenceInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalFenceInfo; + +typedef struct VkExternalFenceProperties { + VkStructureType sType; + void* pNext; + VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes; + VkExternalFenceHandleTypeFlags compatibleHandleTypes; + VkExternalFenceFeatureFlags externalFenceFeatures; +} VkExternalFenceProperties; + +typedef struct VkExportFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlags handleTypes; +} VkExportFenceCreateInfo; + +typedef struct VkExportSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlags handleTypes; +} VkExportSemaphoreCreateInfo; + +typedef struct VkPhysicalDeviceExternalSemaphoreInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalSemaphoreInfo; + +typedef struct VkExternalSemaphoreProperties { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; + VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes; + VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures; +} VkExternalSemaphoreProperties; + +typedef struct VkPhysicalDeviceMaintenance3Properties { + VkStructureType sType; + void* pNext; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceMaintenance3Properties; + +typedef struct VkDescriptorSetLayoutSupport { + VkStructureType sType; + void* pNext; + VkBool32 supported; +} VkDescriptorSetLayoutSupport; + +typedef struct VkPhysicalDeviceShaderDrawParametersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceShaderDrawParametersFeatures; + +typedef VkPhysicalDeviceShaderDrawParametersFeatures VkPhysicalDeviceShaderDrawParameterFeatures; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion( + uint32_t* pApiVersion); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2( + VkDevice device, + const VkDeviceQueueInfo2* pQueueInfo, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + + +#define VK_VERSION_1_2 1 +// Vulkan 1.2 version number +#define VK_API_VERSION_1_2 VK_MAKE_API_VERSION(0, 1, 2, 0)// Patch version should always be set to 0 + +#define VK_MAX_DRIVER_NAME_SIZE 256U +#define VK_MAX_DRIVER_INFO_SIZE 256U + +typedef enum VkDriverId { + VK_DRIVER_ID_AMD_PROPRIETARY = 1, + VK_DRIVER_ID_AMD_OPEN_SOURCE = 2, + VK_DRIVER_ID_MESA_RADV = 3, + VK_DRIVER_ID_NVIDIA_PROPRIETARY = 4, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS = 5, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA = 6, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY = 7, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY = 8, + VK_DRIVER_ID_ARM_PROPRIETARY = 9, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER = 10, + VK_DRIVER_ID_GGP_PROPRIETARY = 11, + VK_DRIVER_ID_BROADCOM_PROPRIETARY = 12, + VK_DRIVER_ID_MESA_LLVMPIPE = 13, + VK_DRIVER_ID_MOLTENVK = 14, + VK_DRIVER_ID_COREAVI_PROPRIETARY = 15, + VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY, + VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE, + VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV, + VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR = VK_DRIVER_ID_NVIDIA_PROPRIETARY, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR = VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY_KHR = VK_DRIVER_ID_IMAGINATION_PROPRIETARY, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY_KHR = VK_DRIVER_ID_QUALCOMM_PROPRIETARY, + VK_DRIVER_ID_ARM_PROPRIETARY_KHR = VK_DRIVER_ID_ARM_PROPRIETARY, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER_KHR = VK_DRIVER_ID_GOOGLE_SWIFTSHADER, + VK_DRIVER_ID_GGP_PROPRIETARY_KHR = VK_DRIVER_ID_GGP_PROPRIETARY, + VK_DRIVER_ID_BROADCOM_PROPRIETARY_KHR = VK_DRIVER_ID_BROADCOM_PROPRIETARY, + VK_DRIVER_ID_MAX_ENUM = 0x7FFFFFFF +} VkDriverId; + +typedef enum VkShaderFloatControlsIndependence { + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY = 0, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL = 1, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE = 2, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_MAX_ENUM = 0x7FFFFFFF +} VkShaderFloatControlsIndependence; + +typedef enum VkSamplerReductionMode { + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE = 0, + VK_SAMPLER_REDUCTION_MODE_MIN = 1, + VK_SAMPLER_REDUCTION_MODE_MAX = 2, + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE, + VK_SAMPLER_REDUCTION_MODE_MIN_EXT = VK_SAMPLER_REDUCTION_MODE_MIN, + VK_SAMPLER_REDUCTION_MODE_MAX_EXT = VK_SAMPLER_REDUCTION_MODE_MAX, + VK_SAMPLER_REDUCTION_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerReductionMode; + +typedef enum VkSemaphoreType { + VK_SEMAPHORE_TYPE_BINARY = 0, + VK_SEMAPHORE_TYPE_TIMELINE = 1, + VK_SEMAPHORE_TYPE_BINARY_KHR = VK_SEMAPHORE_TYPE_BINARY, + VK_SEMAPHORE_TYPE_TIMELINE_KHR = VK_SEMAPHORE_TYPE_TIMELINE, + VK_SEMAPHORE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreType; + +typedef enum VkResolveModeFlagBits { + VK_RESOLVE_MODE_NONE = 0, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT = 0x00000001, + VK_RESOLVE_MODE_AVERAGE_BIT = 0x00000002, + VK_RESOLVE_MODE_MIN_BIT = 0x00000004, + VK_RESOLVE_MODE_MAX_BIT = 0x00000008, + VK_RESOLVE_MODE_NONE_KHR = VK_RESOLVE_MODE_NONE, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT, + VK_RESOLVE_MODE_AVERAGE_BIT_KHR = VK_RESOLVE_MODE_AVERAGE_BIT, + VK_RESOLVE_MODE_MIN_BIT_KHR = VK_RESOLVE_MODE_MIN_BIT, + VK_RESOLVE_MODE_MAX_BIT_KHR = VK_RESOLVE_MODE_MAX_BIT, + VK_RESOLVE_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkResolveModeFlagBits; +typedef VkFlags VkResolveModeFlags; + +typedef enum VkDescriptorBindingFlagBits { + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT = 0x00000001, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT = 0x00000002, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT = 0x00000004, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT = 0x00000008, + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT, + VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorBindingFlagBits; +typedef VkFlags VkDescriptorBindingFlags; + +typedef enum VkSemaphoreWaitFlagBits { + VK_SEMAPHORE_WAIT_ANY_BIT = 0x00000001, + VK_SEMAPHORE_WAIT_ANY_BIT_KHR = VK_SEMAPHORE_WAIT_ANY_BIT, + VK_SEMAPHORE_WAIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreWaitFlagBits; +typedef VkFlags VkSemaphoreWaitFlags; +typedef struct VkPhysicalDeviceVulkan11Features { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; + VkBool32 protectedMemory; + VkBool32 samplerYcbcrConversion; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceVulkan11Features; + +typedef struct VkPhysicalDeviceVulkan11Properties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; + uint32_t subgroupSize; + VkShaderStageFlags subgroupSupportedStages; + VkSubgroupFeatureFlags subgroupSupportedOperations; + VkBool32 subgroupQuadOperationsInAllStages; + VkPointClippingBehavior pointClippingBehavior; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; + VkBool32 protectedNoFault; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceVulkan11Properties; + +typedef struct VkPhysicalDeviceVulkan12Features { + VkStructureType sType; + void* pNext; + VkBool32 samplerMirrorClampToEdge; + VkBool32 drawIndirectCount; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; + VkBool32 descriptorIndexing; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; + VkBool32 samplerFilterMinmax; + VkBool32 scalarBlockLayout; + VkBool32 imagelessFramebuffer; + VkBool32 uniformBufferStandardLayout; + VkBool32 shaderSubgroupExtendedTypes; + VkBool32 separateDepthStencilLayouts; + VkBool32 hostQueryReset; + VkBool32 timelineSemaphore; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; + VkBool32 shaderOutputViewportIndex; + VkBool32 shaderOutputLayer; + VkBool32 subgroupBroadcastDynamicId; +} VkPhysicalDeviceVulkan12Features; + +typedef struct VkConformanceVersion { + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t patch; +} VkConformanceVersion; + +typedef struct VkPhysicalDeviceVulkan12Properties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; + uint64_t maxTimelineSemaphoreValueDifference; + VkSampleCountFlags framebufferIntegerColorSampleCounts; +} VkPhysicalDeviceVulkan12Properties; + +typedef struct VkImageFormatListCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkImageFormatListCreateInfo; + +typedef struct VkAttachmentDescription2 { + VkStructureType sType; + const void* pNext; + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription2; + +typedef struct VkAttachmentReference2 { + VkStructureType sType; + const void* pNext; + uint32_t attachment; + VkImageLayout layout; + VkImageAspectFlags aspectMask; +} VkAttachmentReference2; + +typedef struct VkSubpassDescription2 { + VkStructureType sType; + const void* pNext; + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t viewMask; + uint32_t inputAttachmentCount; + const VkAttachmentReference2* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference2* pColorAttachments; + const VkAttachmentReference2* pResolveAttachments; + const VkAttachmentReference2* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription2; + +typedef struct VkSubpassDependency2 { + VkStructureType sType; + const void* pNext; + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; + int32_t viewOffset; +} VkSubpassDependency2; + +typedef struct VkRenderPassCreateInfo2 { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription2* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription2* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency2* pDependencies; + uint32_t correlatedViewMaskCount; + const uint32_t* pCorrelatedViewMasks; +} VkRenderPassCreateInfo2; + +typedef struct VkSubpassBeginInfo { + VkStructureType sType; + const void* pNext; + VkSubpassContents contents; +} VkSubpassBeginInfo; + +typedef struct VkSubpassEndInfo { + VkStructureType sType; + const void* pNext; +} VkSubpassEndInfo; + +typedef struct VkPhysicalDevice8BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; +} VkPhysicalDevice8BitStorageFeatures; + +typedef struct VkPhysicalDeviceDriverProperties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; +} VkPhysicalDeviceDriverProperties; + +typedef struct VkPhysicalDeviceShaderAtomicInt64Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; +} VkPhysicalDeviceShaderAtomicInt64Features; + +typedef struct VkPhysicalDeviceShaderFloat16Int8Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; +} VkPhysicalDeviceShaderFloat16Int8Features; + +typedef struct VkPhysicalDeviceFloatControlsProperties { + VkStructureType sType; + void* pNext; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; +} VkPhysicalDeviceFloatControlsProperties; + +typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t bindingCount; + const VkDescriptorBindingFlags* pBindingFlags; +} VkDescriptorSetLayoutBindingFlagsCreateInfo; + +typedef struct VkPhysicalDeviceDescriptorIndexingFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; +} VkPhysicalDeviceDescriptorIndexingFeatures; + +typedef struct VkPhysicalDeviceDescriptorIndexingProperties { + VkStructureType sType; + void* pNext; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; +} VkPhysicalDeviceDescriptorIndexingProperties; + +typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfo { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSetCount; + const uint32_t* pDescriptorCounts; +} VkDescriptorSetVariableDescriptorCountAllocateInfo; + +typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupport { + VkStructureType sType; + void* pNext; + uint32_t maxVariableDescriptorCount; +} VkDescriptorSetVariableDescriptorCountLayoutSupport; + +typedef struct VkSubpassDescriptionDepthStencilResolve { + VkStructureType sType; + const void* pNext; + VkResolveModeFlagBits depthResolveMode; + VkResolveModeFlagBits stencilResolveMode; + const VkAttachmentReference2* pDepthStencilResolveAttachment; +} VkSubpassDescriptionDepthStencilResolve; + +typedef struct VkPhysicalDeviceDepthStencilResolveProperties { + VkStructureType sType; + void* pNext; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; +} VkPhysicalDeviceDepthStencilResolveProperties; + +typedef struct VkPhysicalDeviceScalarBlockLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 scalarBlockLayout; +} VkPhysicalDeviceScalarBlockLayoutFeatures; + +typedef struct VkImageStencilUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags stencilUsage; +} VkImageStencilUsageCreateInfo; + +typedef struct VkSamplerReductionModeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerReductionMode reductionMode; +} VkSamplerReductionModeCreateInfo; + +typedef struct VkPhysicalDeviceSamplerFilterMinmaxProperties { + VkStructureType sType; + void* pNext; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; +} VkPhysicalDeviceSamplerFilterMinmaxProperties; + +typedef struct VkPhysicalDeviceVulkanMemoryModelFeatures { + VkStructureType sType; + void* pNext; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; +} VkPhysicalDeviceVulkanMemoryModelFeatures; + +typedef struct VkPhysicalDeviceImagelessFramebufferFeatures { + VkStructureType sType; + void* pNext; + VkBool32 imagelessFramebuffer; +} VkPhysicalDeviceImagelessFramebufferFeatures; + +typedef struct VkFramebufferAttachmentImageInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageUsageFlags usage; + uint32_t width; + uint32_t height; + uint32_t layerCount; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkFramebufferAttachmentImageInfo; + +typedef struct VkFramebufferAttachmentsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentImageInfoCount; + const VkFramebufferAttachmentImageInfo* pAttachmentImageInfos; +} VkFramebufferAttachmentsCreateInfo; + +typedef struct VkRenderPassAttachmentBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentCount; + const VkImageView* pAttachments; +} VkRenderPassAttachmentBeginInfo; + +typedef struct VkPhysicalDeviceUniformBufferStandardLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 uniformBufferStandardLayout; +} VkPhysicalDeviceUniformBufferStandardLayoutFeatures; + +typedef struct VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupExtendedTypes; +} VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures; + +typedef struct VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures { + VkStructureType sType; + void* pNext; + VkBool32 separateDepthStencilLayouts; +} VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures; + +typedef struct VkAttachmentReferenceStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilLayout; +} VkAttachmentReferenceStencilLayout; + +typedef struct VkAttachmentDescriptionStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilInitialLayout; + VkImageLayout stencilFinalLayout; +} VkAttachmentDescriptionStencilLayout; + +typedef struct VkPhysicalDeviceHostQueryResetFeatures { + VkStructureType sType; + void* pNext; + VkBool32 hostQueryReset; +} VkPhysicalDeviceHostQueryResetFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreFeatures { + VkStructureType sType; + void* pNext; + VkBool32 timelineSemaphore; +} VkPhysicalDeviceTimelineSemaphoreFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreProperties { + VkStructureType sType; + void* pNext; + uint64_t maxTimelineSemaphoreValueDifference; +} VkPhysicalDeviceTimelineSemaphoreProperties; + +typedef struct VkSemaphoreTypeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreType semaphoreType; + uint64_t initialValue; +} VkSemaphoreTypeCreateInfo; + +typedef struct VkTimelineSemaphoreSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValueCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValueCount; + const uint64_t* pSignalSemaphoreValues; +} VkTimelineSemaphoreSubmitInfo; + +typedef struct VkSemaphoreWaitInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreWaitFlags flags; + uint32_t semaphoreCount; + const VkSemaphore* pSemaphores; + const uint64_t* pValues; +} VkSemaphoreWaitInfo; + +typedef struct VkSemaphoreSignalInfo { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + uint64_t value; +} VkSemaphoreSignalInfo; + +typedef struct VkPhysicalDeviceBufferDeviceAddressFeatures { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeatures; + +typedef struct VkBufferDeviceAddressInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferDeviceAddressInfo; + +typedef struct VkBufferOpaqueCaptureAddressCreateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkBufferOpaqueCaptureAddressCreateInfo; + +typedef struct VkMemoryOpaqueCaptureAddressAllocateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkMemoryOpaqueCaptureAddressAllocateInfo; + +typedef struct VkDeviceMemoryOpaqueCaptureAddressInfo { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkDeviceMemoryOpaqueCaptureAddressInfo; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkResetQueryPool)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValue)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphores)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphore)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddress)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkResetQueryPool( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValue( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphores( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphore( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif + + +#define VK_KHR_surface 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) +#define VK_KHR_SURFACE_SPEC_VERSION 25 +#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" + +typedef enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, + VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, + VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, + VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentModeKHR; + +typedef enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, + VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, + VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, + VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, + VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, + VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, + VK_COLOR_SPACE_DISPLAY_NATIVE_AMD = 1000213000, + VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, + VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkColorSpaceKHR; + +typedef enum VkSurfaceTransformFlagBitsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, + VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSurfaceTransformFlagBitsKHR; + +typedef enum VkCompositeAlphaFlagBitsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, + VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCompositeAlphaFlagBitsKHR; +typedef VkFlags VkCompositeAlphaFlagsKHR; +typedef VkFlags VkSurfaceTransformFlagsKHR; +typedef struct VkSurfaceCapabilitiesKHR { + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; +} VkSurfaceCapabilitiesKHR; + +typedef struct VkSurfaceFormatKHR { + VkFormat format; + VkColorSpaceKHR colorSpace; +} VkSurfaceFormatKHR; + +typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( + VkInstance instance, + VkSurfaceKHR surface, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + VkSurfaceKHR surface, + VkBool32* pSupported); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormatKHR* pSurfaceFormats); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif + + +#define VK_KHR_swapchain 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) +#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70 +#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" + +typedef enum VkSwapchainCreateFlagBitsKHR { + VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001, + VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002, + VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x00000004, + VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSwapchainCreateFlagBitsKHR; +typedef VkFlags VkSwapchainCreateFlagsKHR; + +typedef enum VkDeviceGroupPresentModeFlagBitsKHR { + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001, + VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002, + VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004, + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008, + VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDeviceGroupPresentModeFlagBitsKHR; +typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; +typedef struct VkSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainCreateFlagsKHR flags; + VkSurfaceKHR surface; + uint32_t minImageCount; + VkFormat imageFormat; + VkColorSpaceKHR imageColorSpace; + VkExtent2D imageExtent; + uint32_t imageArrayLayers; + VkImageUsageFlags imageUsage; + VkSharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkSurfaceTransformFlagBitsKHR preTransform; + VkCompositeAlphaFlagBitsKHR compositeAlpha; + VkPresentModeKHR presentMode; + VkBool32 clipped; + VkSwapchainKHR oldSwapchain; +} VkSwapchainCreateInfoKHR; + +typedef struct VkPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t swapchainCount; + const VkSwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + VkResult* pResults; +} VkPresentInfoKHR; + +typedef struct VkImageSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; +} VkImageSwapchainCreateInfoKHR; + +typedef struct VkBindImageMemorySwapchainInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndex; +} VkBindImageMemorySwapchainInfoKHR; + +typedef struct VkAcquireNextImageInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint64_t timeout; + VkSemaphore semaphore; + VkFence fence; + uint32_t deviceMask; +} VkAcquireNextImageInfoKHR; + +typedef struct VkDeviceGroupPresentCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupPresentCapabilitiesKHR; + +typedef struct VkDeviceGroupPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + VkDeviceGroupPresentModeFlagBitsKHR mode; +} VkDeviceGroupPresentInfoKHR; + +typedef struct VkDeviceGroupSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupSwapchainCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); +typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); +typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( + VkDevice device, + const VkSwapchainCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchain); + +VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pSwapchainImageCount, + VkImage* pSwapchainImages); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t timeout, + VkSemaphore semaphore, + VkFence fence, + uint32_t* pImageIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( + VkQueue queue, + const VkPresentInfoKHR* pPresentInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( + VkDevice device, + const VkAcquireNextImageInfoKHR* pAcquireInfo, + uint32_t* pImageIndex); +#endif + + +#define VK_KHR_display 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) +#define VK_KHR_DISPLAY_SPEC_VERSION 23 +#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" +typedef VkFlags VkDisplayModeCreateFlagsKHR; + +typedef enum VkDisplayPlaneAlphaFlagBitsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, + VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDisplayPlaneAlphaFlagBitsKHR; +typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; +typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; +typedef struct VkDisplayModeParametersKHR { + VkExtent2D visibleRegion; + uint32_t refreshRate; +} VkDisplayModeParametersKHR; + +typedef struct VkDisplayModeCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeCreateFlagsKHR flags; + VkDisplayModeParametersKHR parameters; +} VkDisplayModeCreateInfoKHR; + +typedef struct VkDisplayModePropertiesKHR { + VkDisplayModeKHR displayMode; + VkDisplayModeParametersKHR parameters; +} VkDisplayModePropertiesKHR; + +typedef struct VkDisplayPlaneCapabilitiesKHR { + VkDisplayPlaneAlphaFlagsKHR supportedAlpha; + VkOffset2D minSrcPosition; + VkOffset2D maxSrcPosition; + VkExtent2D minSrcExtent; + VkExtent2D maxSrcExtent; + VkOffset2D minDstPosition; + VkOffset2D maxDstPosition; + VkExtent2D minDstExtent; + VkExtent2D maxDstExtent; +} VkDisplayPlaneCapabilitiesKHR; + +typedef struct VkDisplayPlanePropertiesKHR { + VkDisplayKHR currentDisplay; + uint32_t currentStackIndex; +} VkDisplayPlanePropertiesKHR; + +typedef struct VkDisplayPropertiesKHR { + VkDisplayKHR display; + const char* displayName; + VkExtent2D physicalDimensions; + VkExtent2D physicalResolution; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkBool32 planeReorderPossible; + VkBool32 persistentContent; +} VkDisplayPropertiesKHR; + +typedef struct VkDisplaySurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceCreateFlagsKHR flags; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkDisplaySurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlanePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( + VkPhysicalDevice physicalDevice, + uint32_t planeIndex, + uint32_t* pDisplayCount, + VkDisplayKHR* pDisplays); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDisplayModeKHR* pMode); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR* pCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( + VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_KHR_display_swapchain 1 +#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 10 +#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" +typedef struct VkDisplayPresentInfoKHR { + VkStructureType sType; + const void* pNext; + VkRect2D srcRect; + VkRect2D dstRect; + VkBool32 persistent; +} VkDisplayPresentInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchains); +#endif + + +#define VK_KHR_sampler_mirror_clamp_to_edge 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 3 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" + + +#define VK_KHR_multiview 1 +#define VK_KHR_MULTIVIEW_SPEC_VERSION 1 +#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview" +typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR; + +typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR; + +typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR; + + + +#define VK_KHR_get_physical_device_properties2 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 2 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" +typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR; + +typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR; + +typedef VkFormatProperties2 VkFormatProperties2KHR; + +typedef VkImageFormatProperties2 VkImageFormatProperties2KHR; + +typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR; + +typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR; + +typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR; + +typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR; + +typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); +#endif + + +#define VK_KHR_device_group 1 +#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 4 +#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group" +typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR; + +typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR; + +typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR; + +typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR; + +typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR; + +typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR; + +typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR; + +typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR; + +typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR; + +typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR; + +typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif + + +#define VK_KHR_shader_draw_parameters 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" + + +#define VK_KHR_maintenance1 1 +#define VK_KHR_MAINTENANCE1_SPEC_VERSION 2 +#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1" +typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; + +typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); +#endif + + +#define VK_KHR_device_group_creation 1 +#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1 +#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation" +#define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE +typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR; + +typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +#endif + + +#define VK_KHR_external_memory_capabilities 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" +#define VK_LUID_SIZE_KHR VK_LUID_SIZE +typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR; + +typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR; + +typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR; + +typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR; + +typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR; + +typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR; + +typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR; + +typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR; + +typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR; + +typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); +#endif + + +#define VK_KHR_external_memory 1 +#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" +#define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL +typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR; + +typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR; + +typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR; + + + +#define VK_KHR_external_memory_fd 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" +typedef struct VkImportMemoryFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + int fd; +} VkImportMemoryFdInfoKHR; + +typedef struct VkMemoryFdPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryFdPropertiesKHR; + +typedef struct VkMemoryGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties); +#endif + + +#define VK_KHR_external_semaphore_capabilities 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" +typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR; + +typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR; + +typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR; + +typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR; + +typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +#endif + + +#define VK_KHR_external_semaphore 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" +typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR; + +typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR; + +typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR; + + + +#define VK_KHR_external_semaphore_fd 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" +typedef struct VkImportSemaphoreFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + int fd; +} VkImportSemaphoreFdInfoKHR; + +typedef struct VkSemaphoreGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + + +#define VK_KHR_push_descriptor 1 +#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 +#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" +typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); +#endif + + +#define VK_KHR_shader_float16_int8 1 +#define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1 +#define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME "VK_KHR_shader_float16_int8" +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceShaderFloat16Int8FeaturesKHR; + +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceFloat16Int8FeaturesKHR; + + + +#define VK_KHR_16bit_storage 1 +#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" +typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR; + + + +#define VK_KHR_incremental_present 1 +#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 2 +#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" +typedef struct VkRectLayerKHR { + VkOffset2D offset; + VkExtent2D extent; + uint32_t layer; +} VkRectLayerKHR; + +typedef struct VkPresentRegionKHR { + uint32_t rectangleCount; + const VkRectLayerKHR* pRectangles; +} VkPresentRegionKHR; + +typedef struct VkPresentRegionsKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentRegionKHR* pRegions; +} VkPresentRegionsKHR; + + + +#define VK_KHR_descriptor_update_template 1 +typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR; + +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" +typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR; + +typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR; + +typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR; + +typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); +#endif + + +#define VK_KHR_imageless_framebuffer 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME "VK_KHR_imageless_framebuffer" +typedef VkPhysicalDeviceImagelessFramebufferFeatures VkPhysicalDeviceImagelessFramebufferFeaturesKHR; + +typedef VkFramebufferAttachmentsCreateInfo VkFramebufferAttachmentsCreateInfoKHR; + +typedef VkFramebufferAttachmentImageInfo VkFramebufferAttachmentImageInfoKHR; + +typedef VkRenderPassAttachmentBeginInfo VkRenderPassAttachmentBeginInfoKHR; + + + +#define VK_KHR_create_renderpass2 1 +#define VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION 1 +#define VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME "VK_KHR_create_renderpass2" +typedef VkRenderPassCreateInfo2 VkRenderPassCreateInfo2KHR; + +typedef VkAttachmentDescription2 VkAttachmentDescription2KHR; + +typedef VkAttachmentReference2 VkAttachmentReference2KHR; + +typedef VkSubpassDescription2 VkSubpassDescription2KHR; + +typedef VkSubpassDependency2 VkSubpassDependency2KHR; + +typedef VkSubpassBeginInfo VkSubpassBeginInfoKHR; + +typedef VkSubpassEndInfo VkSubpassEndInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2KHR( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); +#endif + + +#define VK_KHR_shared_presentable_image 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" +typedef struct VkSharedPresentSurfaceCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkImageUsageFlags sharedPresentSupportedUsageFlags; +} VkSharedPresentSurfaceCapabilitiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain); +#endif + + +#define VK_KHR_external_fence_capabilities 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" +typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR; + +typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR; + +typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR; + +typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR; + +typedef VkExternalFenceProperties VkExternalFencePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); +#endif + + +#define VK_KHR_external_fence 1 +#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" +typedef VkFenceImportFlags VkFenceImportFlagsKHR; + +typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR; + +typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR; + + + +#define VK_KHR_external_fence_fd 1 +#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" +typedef struct VkImportFenceFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + int fd; +} VkImportFenceFdInfoKHR; + +typedef struct VkFenceGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + + +#define VK_KHR_performance_query 1 +#define VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION 1 +#define VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME "VK_KHR_performance_query" + +typedef enum VkPerformanceCounterUnitKHR { + VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR = 0, + VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR = 1, + VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR = 2, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR = 3, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR = 4, + VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR = 5, + VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR = 6, + VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR = 7, + VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR = 8, + VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR = 9, + VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR = 10, + VK_PERFORMANCE_COUNTER_UNIT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterUnitKHR; + +typedef enum VkPerformanceCounterScopeKHR { + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR = 0, + VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR = 1, + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR = 2, + VK_QUERY_SCOPE_COMMAND_BUFFER_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, + VK_QUERY_SCOPE_RENDER_PASS_KHR = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR, + VK_QUERY_SCOPE_COMMAND_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, + VK_PERFORMANCE_COUNTER_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterScopeKHR; + +typedef enum VkPerformanceCounterStorageKHR { + VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR = 0, + VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR = 1, + VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR = 2, + VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR = 3, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR = 4, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR = 5, + VK_PERFORMANCE_COUNTER_STORAGE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterStorageKHR; + +typedef enum VkPerformanceCounterDescriptionFlagBitsKHR { + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR = 0x00000001, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR = 0x00000002, + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR, + VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterDescriptionFlagBitsKHR; +typedef VkFlags VkPerformanceCounterDescriptionFlagsKHR; + +typedef enum VkAcquireProfilingLockFlagBitsKHR { + VK_ACQUIRE_PROFILING_LOCK_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAcquireProfilingLockFlagBitsKHR; +typedef VkFlags VkAcquireProfilingLockFlagsKHR; +typedef struct VkPhysicalDevicePerformanceQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 performanceCounterQueryPools; + VkBool32 performanceCounterMultipleQueryPools; +} VkPhysicalDevicePerformanceQueryFeaturesKHR; + +typedef struct VkPhysicalDevicePerformanceQueryPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 allowCommandBufferQueryCopies; +} VkPhysicalDevicePerformanceQueryPropertiesKHR; + +typedef struct VkPerformanceCounterKHR { + VkStructureType sType; + const void* pNext; + VkPerformanceCounterUnitKHR unit; + VkPerformanceCounterScopeKHR scope; + VkPerformanceCounterStorageKHR storage; + uint8_t uuid[VK_UUID_SIZE]; +} VkPerformanceCounterKHR; + +typedef struct VkPerformanceCounterDescriptionKHR { + VkStructureType sType; + const void* pNext; + VkPerformanceCounterDescriptionFlagsKHR flags; + char name[VK_MAX_DESCRIPTION_SIZE]; + char category[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkPerformanceCounterDescriptionKHR; + +typedef struct VkQueryPoolPerformanceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t queueFamilyIndex; + uint32_t counterIndexCount; + const uint32_t* pCounterIndices; +} VkQueryPoolPerformanceCreateInfoKHR; + +typedef union VkPerformanceCounterResultKHR { + int32_t int32; + int64_t int64; + uint32_t uint32; + uint64_t uint64; + float float32; + double float64; +} VkPerformanceCounterResultKHR; + +typedef struct VkAcquireProfilingLockInfoKHR { + VkStructureType sType; + const void* pNext; + VkAcquireProfilingLockFlagsKHR flags; + uint64_t timeout; +} VkAcquireProfilingLockInfoKHR; + +typedef struct VkPerformanceQuerySubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t counterPassIndex; +} VkPerformanceQuerySubmitInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)(VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireProfilingLockKHR)(VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkReleaseProfilingLockKHR)(VkDevice device); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pCounterCount, + VkPerformanceCounterKHR* pCounters, + VkPerformanceCounterDescriptionKHR* pCounterDescriptions); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( + VkPhysicalDevice physicalDevice, + const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, + uint32_t* pNumPasses); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireProfilingLockKHR( + VkDevice device, + const VkAcquireProfilingLockInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR( + VkDevice device); +#endif + + +#define VK_KHR_maintenance2 1 +#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2" +typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; + +typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR; + +typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR; + +typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR; + +typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR; + +typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR; + +typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR; + + + +#define VK_KHR_get_surface_capabilities2 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" +typedef struct VkPhysicalDeviceSurfaceInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSurfaceKHR surface; +} VkPhysicalDeviceSurfaceInfo2KHR; + +typedef struct VkSurfaceCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceCapabilitiesKHR surfaceCapabilities; +} VkSurfaceCapabilities2KHR; + +typedef struct VkSurfaceFormat2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceFormatKHR surfaceFormat; +} VkSurfaceFormat2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkSurfaceCapabilities2KHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormat2KHR* pSurfaceFormats); +#endif + + +#define VK_KHR_variable_pointers 1 +#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 +#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeaturesKHR; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointersFeaturesKHR; + + + +#define VK_KHR_get_display_properties2 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_display_properties2" +typedef struct VkDisplayProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPropertiesKHR displayProperties; +} VkDisplayProperties2KHR; + +typedef struct VkDisplayPlaneProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlanePropertiesKHR displayPlaneProperties; +} VkDisplayPlaneProperties2KHR; + +typedef struct VkDisplayModeProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayModePropertiesKHR displayModeProperties; +} VkDisplayModeProperties2KHR; + +typedef struct VkDisplayPlaneInfo2KHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeKHR mode; + uint32_t planeIndex; +} VkDisplayPlaneInfo2KHR; + +typedef struct VkDisplayPlaneCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlaneCapabilitiesKHR capabilities; +} VkDisplayPlaneCapabilities2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlaneProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModeProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR* pCapabilities); +#endif + + +#define VK_KHR_dedicated_allocation 1 +#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 +#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" +typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR; + +typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR; + + + +#define VK_KHR_storage_buffer_storage_class 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" + + +#define VK_KHR_relaxed_block_layout 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" + + +#define VK_KHR_get_memory_requirements2 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" +typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR; + +typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR; + +typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR; + +typedef VkMemoryRequirements2 VkMemoryRequirements2KHR; + +typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +#endif + + +#define VK_KHR_image_format_list 1 +#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 +#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" +typedef VkImageFormatListCreateInfo VkImageFormatListCreateInfoKHR; + + + +#define VK_KHR_sampler_ycbcr_conversion 1 +typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR; + +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 14 +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" +typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR; + +typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR; + +typedef VkChromaLocation VkChromaLocationKHR; + +typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR; + +typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR; + +typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR; + +typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR; + +typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; + +typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif + + +#define VK_KHR_bind_memory2 1 +#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" +typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR; + +typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); +#endif + + +#define VK_KHR_maintenance3 1 +#define VK_KHR_MAINTENANCE3_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE3_EXTENSION_NAME "VK_KHR_maintenance3" +typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; + +typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + + +#define VK_KHR_draw_indirect_count 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_KHR_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_KHR_shader_subgroup_extended_types 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME "VK_KHR_shader_subgroup_extended_types" +typedef VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR; + + + +#define VK_KHR_8bit_storage 1 +#define VK_KHR_8BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_8BIT_STORAGE_EXTENSION_NAME "VK_KHR_8bit_storage" +typedef VkPhysicalDevice8BitStorageFeatures VkPhysicalDevice8BitStorageFeaturesKHR; + + + +#define VK_KHR_shader_atomic_int64 1 +#define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME "VK_KHR_shader_atomic_int64" +typedef VkPhysicalDeviceShaderAtomicInt64Features VkPhysicalDeviceShaderAtomicInt64FeaturesKHR; + + + +#define VK_KHR_shader_clock 1 +#define VK_KHR_SHADER_CLOCK_SPEC_VERSION 1 +#define VK_KHR_SHADER_CLOCK_EXTENSION_NAME "VK_KHR_shader_clock" +typedef struct VkPhysicalDeviceShaderClockFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupClock; + VkBool32 shaderDeviceClock; +} VkPhysicalDeviceShaderClockFeaturesKHR; + + + +#define VK_KHR_driver_properties 1 +#define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME "VK_KHR_driver_properties" +#define VK_MAX_DRIVER_NAME_SIZE_KHR VK_MAX_DRIVER_NAME_SIZE +#define VK_MAX_DRIVER_INFO_SIZE_KHR VK_MAX_DRIVER_INFO_SIZE +typedef VkDriverId VkDriverIdKHR; + +typedef VkConformanceVersion VkConformanceVersionKHR; + +typedef VkPhysicalDeviceDriverProperties VkPhysicalDeviceDriverPropertiesKHR; + + + +#define VK_KHR_shader_float_controls 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 4 +#define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME "VK_KHR_shader_float_controls" +typedef VkShaderFloatControlsIndependence VkShaderFloatControlsIndependenceKHR; + +typedef VkPhysicalDeviceFloatControlsProperties VkPhysicalDeviceFloatControlsPropertiesKHR; + + + +#define VK_KHR_depth_stencil_resolve 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME "VK_KHR_depth_stencil_resolve" +typedef VkResolveModeFlagBits VkResolveModeFlagBitsKHR; + +typedef VkResolveModeFlags VkResolveModeFlagsKHR; + +typedef VkSubpassDescriptionDepthStencilResolve VkSubpassDescriptionDepthStencilResolveKHR; + +typedef VkPhysicalDeviceDepthStencilResolveProperties VkPhysicalDeviceDepthStencilResolvePropertiesKHR; + + + +#define VK_KHR_swapchain_mutable_format 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME "VK_KHR_swapchain_mutable_format" + + +#define VK_KHR_timeline_semaphore 1 +#define VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION 2 +#define VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME "VK_KHR_timeline_semaphore" +typedef VkSemaphoreType VkSemaphoreTypeKHR; + +typedef VkSemaphoreWaitFlagBits VkSemaphoreWaitFlagBitsKHR; + +typedef VkSemaphoreWaitFlags VkSemaphoreWaitFlagsKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreFeatures VkPhysicalDeviceTimelineSemaphoreFeaturesKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreProperties VkPhysicalDeviceTimelineSemaphorePropertiesKHR; + +typedef VkSemaphoreTypeCreateInfo VkSemaphoreTypeCreateInfoKHR; + +typedef VkTimelineSemaphoreSubmitInfo VkTimelineSemaphoreSubmitInfoKHR; + +typedef VkSemaphoreWaitInfo VkSemaphoreWaitInfoKHR; + +typedef VkSemaphoreSignalInfo VkSemaphoreSignalInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValueKHR)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphoresKHR)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphoreKHR)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValueKHR( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphoresKHR( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphoreKHR( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); +#endif + + +#define VK_KHR_vulkan_memory_model 1 +#define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 3 +#define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME "VK_KHR_vulkan_memory_model" +typedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryModelFeaturesKHR; + + + +#define VK_KHR_shader_terminate_invocation 1 +#define VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION 1 +#define VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME "VK_KHR_shader_terminate_invocation" +typedef struct VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderTerminateInvocation; +} VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR; + + + +#define VK_KHR_fragment_shading_rate 1 +#define VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION 1 +#define VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME "VK_KHR_fragment_shading_rate" + +typedef enum VkFragmentShadingRateCombinerOpKHR { + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR = 0, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR = 1, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR = 2, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR = 3, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR = 4, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_ENUM_KHR = 0x7FFFFFFF +} VkFragmentShadingRateCombinerOpKHR; +typedef struct VkFragmentShadingRateAttachmentInfoKHR { + VkStructureType sType; + const void* pNext; + const VkAttachmentReference2* pFragmentShadingRateAttachment; + VkExtent2D shadingRateAttachmentTexelSize; +} VkFragmentShadingRateAttachmentInfoKHR; + +typedef struct VkPipelineFragmentShadingRateStateCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExtent2D fragmentSize; + VkFragmentShadingRateCombinerOpKHR combinerOps[2]; +} VkPipelineFragmentShadingRateStateCreateInfoKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRateFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineFragmentShadingRate; + VkBool32 primitiveFragmentShadingRate; + VkBool32 attachmentFragmentShadingRate; +} VkPhysicalDeviceFragmentShadingRateFeaturesKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRatePropertiesKHR { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentShadingRateAttachmentTexelSize; + VkExtent2D maxFragmentShadingRateAttachmentTexelSize; + uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio; + VkBool32 primitiveFragmentShadingRateWithMultipleViewports; + VkBool32 layeredShadingRateAttachments; + VkBool32 fragmentShadingRateNonTrivialCombinerOps; + VkExtent2D maxFragmentSize; + uint32_t maxFragmentSizeAspectRatio; + uint32_t maxFragmentShadingRateCoverageSamples; + VkSampleCountFlagBits maxFragmentShadingRateRasterizationSamples; + VkBool32 fragmentShadingRateWithShaderDepthStencilWrites; + VkBool32 fragmentShadingRateWithSampleMask; + VkBool32 fragmentShadingRateWithShaderSampleMask; + VkBool32 fragmentShadingRateWithConservativeRasterization; + VkBool32 fragmentShadingRateWithFragmentShaderInterlock; + VkBool32 fragmentShadingRateWithCustomSampleLocations; + VkBool32 fragmentShadingRateStrictMultiplyCombiner; +} VkPhysicalDeviceFragmentShadingRatePropertiesKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRateKHR { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleCounts; + VkExtent2D fragmentSize; +} VkPhysicalDeviceFragmentShadingRateKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); +typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateKHR)(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceFragmentShadingRatesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pFragmentShadingRateCount, + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateKHR( + VkCommandBuffer commandBuffer, + const VkExtent2D* pFragmentSize, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); +#endif + + +#define VK_KHR_spirv_1_4 1 +#define VK_KHR_SPIRV_1_4_SPEC_VERSION 1 +#define VK_KHR_SPIRV_1_4_EXTENSION_NAME "VK_KHR_spirv_1_4" + + +#define VK_KHR_surface_protected_capabilities 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME "VK_KHR_surface_protected_capabilities" +typedef struct VkSurfaceProtectedCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + VkBool32 supportsProtected; +} VkSurfaceProtectedCapabilitiesKHR; + + + +#define VK_KHR_separate_depth_stencil_layouts 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME "VK_KHR_separate_depth_stencil_layouts" +typedef VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR; + +typedef VkAttachmentReferenceStencilLayout VkAttachmentReferenceStencilLayoutKHR; + +typedef VkAttachmentDescriptionStencilLayout VkAttachmentDescriptionStencilLayoutKHR; + + + +#define VK_KHR_uniform_buffer_standard_layout 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME "VK_KHR_uniform_buffer_standard_layout" +typedef VkPhysicalDeviceUniformBufferStandardLayoutFeatures VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR; + + + +#define VK_KHR_buffer_device_address 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_KHR_buffer_device_address" +typedef VkPhysicalDeviceBufferDeviceAddressFeatures VkPhysicalDeviceBufferDeviceAddressFeaturesKHR; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoKHR; + +typedef VkBufferOpaqueCaptureAddressCreateInfo VkBufferOpaqueCaptureAddressCreateInfoKHR; + +typedef VkMemoryOpaqueCaptureAddressAllocateInfo VkMemoryOpaqueCaptureAddressAllocateInfoKHR; + +typedef VkDeviceMemoryOpaqueCaptureAddressInfo VkDeviceMemoryOpaqueCaptureAddressInfoKHR; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddressKHR( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif + + +#define VK_KHR_deferred_host_operations 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) +#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 4 +#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations" +typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation); +typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator); +typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR( + VkDevice device, + const VkAllocationCallbacks* pAllocator, + VkDeferredOperationKHR* pDeferredOperation); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR( + VkDevice device, + VkDeferredOperationKHR operation, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR( + VkDevice device, + VkDeferredOperationKHR operation); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR( + VkDevice device, + VkDeferredOperationKHR operation); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR( + VkDevice device, + VkDeferredOperationKHR operation); +#endif + + +#define VK_KHR_pipeline_executable_properties 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME "VK_KHR_pipeline_executable_properties" + +typedef enum VkPipelineExecutableStatisticFormatKHR { + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR = 0, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR = 1, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR = 2, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR = 3, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPipelineExecutableStatisticFormatKHR; +typedef struct VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineExecutableInfo; +} VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + +typedef struct VkPipelineInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; +} VkPipelineInfoKHR; + +typedef struct VkPipelineExecutablePropertiesKHR { + VkStructureType sType; + void* pNext; + VkShaderStageFlags stages; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + uint32_t subgroupSize; +} VkPipelineExecutablePropertiesKHR; + +typedef struct VkPipelineExecutableInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; + uint32_t executableIndex; +} VkPipelineExecutableInfoKHR; + +typedef union VkPipelineExecutableStatisticValueKHR { + VkBool32 b32; + int64_t i64; + uint64_t u64; + double f64; +} VkPipelineExecutableStatisticValueKHR; + +typedef struct VkPipelineExecutableStatisticKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkPipelineExecutableStatisticFormatKHR format; + VkPipelineExecutableStatisticValueKHR value; +} VkPipelineExecutableStatisticKHR; + +typedef struct VkPipelineExecutableInternalRepresentationKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkBool32 isText; + size_t dataSize; + void* pData; +} VkPipelineExecutableInternalRepresentationKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutablePropertiesKHR)(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableStatisticsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableInternalRepresentationsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutablePropertiesKHR( + VkDevice device, + const VkPipelineInfoKHR* pPipelineInfo, + uint32_t* pExecutableCount, + VkPipelineExecutablePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableStatisticsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pStatisticCount, + VkPipelineExecutableStatisticKHR* pStatistics); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pInternalRepresentationCount, + VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); +#endif + + +#define VK_KHR_pipeline_library 1 +#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library" +typedef struct VkPipelineLibraryCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t libraryCount; + const VkPipeline* pLibraries; +} VkPipelineLibraryCreateInfoKHR; + + + +#define VK_KHR_shader_non_semantic_info 1 +#define VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION 1 +#define VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME "VK_KHR_shader_non_semantic_info" + + +#define VK_KHR_synchronization2 1 +typedef uint64_t VkFlags64; +#define VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION 1 +#define VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME "VK_KHR_synchronization2" +typedef VkFlags64 VkPipelineStageFlags2KHR; + +// Flag bits for VkPipelineStageFlagBits2KHR +typedef VkFlags64 VkPipelineStageFlagBits2KHR; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_NONE_KHR = 0ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR = 0x00000001ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR = 0x00000002ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR = 0x00000004ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR = 0x00000008ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR = 0x00000010ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR = 0x00000020ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR = 0x00000040ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR = 0x00000080ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR = 0x00000100ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR = 0x00000200ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR = 0x00000400ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR = 0x00000800ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR = 0x00001000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR = 0x00001000; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR = 0x00002000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_HOST_BIT_KHR = 0x00004000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR = 0x00008000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR = 0x00010000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_COPY_BIT_KHR = 0x100000000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR = 0x200000000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_BLIT_BIT_KHR = 0x400000000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR = 0x800000000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR = 0x1000000000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR = 0x2000000000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR = 0x4000000000ULL; +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR = 0x04000000ULL; +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR = 0x08000000ULL; +#endif +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV = 0x00020000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV = 0x00400000; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR = 0x00200000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV = 0x00200000; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV = 0x00080000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV = 0x00100000ULL; +static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_FLAG_BITS_2KHR_MAX_ENUM_KHR = 0x7FFFFFFFFFFFFFFFULL; + +typedef VkFlags64 VkAccessFlags2KHR; + +// Flag bits for VkAccessFlagBits2KHR +typedef VkFlags64 VkAccessFlagBits2KHR; +static const VkAccessFlagBits2KHR VK_ACCESS_2_NONE_KHR = 0ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR = 0x00000001ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_INDEX_READ_BIT_KHR = 0x00000002ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR = 0x00000004ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_UNIFORM_READ_BIT_KHR = 0x00000008ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR = 0x00000010ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_READ_BIT_KHR = 0x00000020ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_WRITE_BIT_KHR = 0x00000040ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR = 0x00000080ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR = 0x00000100ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR = 0x00000200ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR = 0x00000400ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFER_READ_BIT_KHR = 0x00000800ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR = 0x00001000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_HOST_READ_BIT_KHR = 0x00002000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_HOST_WRITE_BIT_KHR = 0x00004000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_MEMORY_READ_BIT_KHR = 0x00008000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_MEMORY_WRITE_BIT_KHR = 0x00010000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR = 0x100000000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR = 0x200000000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR = 0x400000000ULL; +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkAccessFlagBits2KHR VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR = 0x800000000ULL; +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkAccessFlagBits2KHR VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR = 0x1000000000ULL; +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkAccessFlagBits2KHR VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR = 0x2000000000ULL; +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkAccessFlagBits2KHR VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR = 0x4000000000ULL; +#endif +static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000; +static const VkAccessFlagBits2KHR VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000; +static const VkAccessFlagBits2KHR VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000; +static const VkAccessFlagBits2KHR VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000ULL; +static const VkAccessFlagBits2KHR VK_ACCESS_FLAG_BITS_2KHR_MAX_ENUM_KHR = 0x7FFFFFFFFFFFFFFFULL; + + +typedef enum VkSubmitFlagBitsKHR { + VK_SUBMIT_PROTECTED_BIT_KHR = 0x00000001, + VK_SUBMIT_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSubmitFlagBitsKHR; +typedef VkFlags VkSubmitFlagsKHR; +typedef struct VkMemoryBarrier2KHR { + VkStructureType sType; + const void* pNext; + VkPipelineStageFlags2KHR srcStageMask; + VkAccessFlags2KHR srcAccessMask; + VkPipelineStageFlags2KHR dstStageMask; + VkAccessFlags2KHR dstAccessMask; +} VkMemoryBarrier2KHR; + +typedef struct VkBufferMemoryBarrier2KHR { + VkStructureType sType; + const void* pNext; + VkPipelineStageFlags2KHR srcStageMask; + VkAccessFlags2KHR srcAccessMask; + VkPipelineStageFlags2KHR dstStageMask; + VkAccessFlags2KHR dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier2KHR; + +typedef struct VkImageMemoryBarrier2KHR { + VkStructureType sType; + const void* pNext; + VkPipelineStageFlags2KHR srcStageMask; + VkAccessFlags2KHR srcAccessMask; + VkPipelineStageFlags2KHR dstStageMask; + VkAccessFlags2KHR dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier2KHR; + +typedef struct VkDependencyInfoKHR { + VkStructureType sType; + const void* pNext; + VkDependencyFlags dependencyFlags; + uint32_t memoryBarrierCount; + const VkMemoryBarrier2KHR* pMemoryBarriers; + uint32_t bufferMemoryBarrierCount; + const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers; + uint32_t imageMemoryBarrierCount; + const VkImageMemoryBarrier2KHR* pImageMemoryBarriers; +} VkDependencyInfoKHR; + +typedef struct VkSemaphoreSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + uint64_t value; + VkPipelineStageFlags2KHR stageMask; + uint32_t deviceIndex; +} VkSemaphoreSubmitInfoKHR; + +typedef struct VkCommandBufferSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + VkCommandBuffer commandBuffer; + uint32_t deviceMask; +} VkCommandBufferSubmitInfoKHR; + +typedef struct VkSubmitInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSubmitFlagsKHR flags; + uint32_t waitSemaphoreInfoCount; + const VkSemaphoreSubmitInfoKHR* pWaitSemaphoreInfos; + uint32_t commandBufferInfoCount; + const VkCommandBufferSubmitInfoKHR* pCommandBufferInfos; + uint32_t signalSemaphoreInfoCount; + const VkSemaphoreSubmitInfoKHR* pSignalSemaphoreInfos; +} VkSubmitInfo2KHR; + +typedef struct VkPhysicalDeviceSynchronization2FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 synchronization2; +} VkPhysicalDeviceSynchronization2FeaturesKHR; + +typedef struct VkQueueFamilyCheckpointProperties2NV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags2KHR checkpointExecutionStageMask; +} VkQueueFamilyCheckpointProperties2NV; + +typedef struct VkCheckpointData2NV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags2KHR stage; + void* pCheckpointMarker; +} VkCheckpointData2NV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR* pDependencyInfo); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2KHR stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2KHR)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfoKHR* pDependencyInfos); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2KHR)(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR* pDependencyInfo); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2KHR)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage, VkQueryPool queryPool, uint32_t query); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2KHR)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR* pSubmits, VkFence fence); +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarker2AMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); +typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointData2NV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointData2NV* pCheckpointData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2KHR( + VkCommandBuffer commandBuffer, + VkEvent event, + const VkDependencyInfoKHR* pDependencyInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2KHR( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags2KHR stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2KHR( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + const VkDependencyInfoKHR* pDependencyInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2KHR( + VkCommandBuffer commandBuffer, + const VkDependencyInfoKHR* pDependencyInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2KHR( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags2KHR stage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2KHR( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo2KHR* pSubmits, + VkFence fence); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarker2AMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags2KHR stage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); + +VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointData2NV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointData2NV* pCheckpointData); +#endif + + +#define VK_KHR_zero_initialize_workgroup_memory 1 +#define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION 1 +#define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_EXTENSION_NAME "VK_KHR_zero_initialize_workgroup_memory" +typedef struct VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderZeroInitializeWorkgroupMemory; +} VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR; + + + +#define VK_KHR_workgroup_memory_explicit_layout 1 +#define VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME "VK_KHR_workgroup_memory_explicit_layout" +typedef struct VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 workgroupMemoryExplicitLayout; + VkBool32 workgroupMemoryExplicitLayoutScalarBlockLayout; + VkBool32 workgroupMemoryExplicitLayout8BitAccess; + VkBool32 workgroupMemoryExplicitLayout16BitAccess; +} VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR; + + + +#define VK_KHR_copy_commands2 1 +#define VK_KHR_COPY_COMMANDS_2_SPEC_VERSION 1 +#define VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME "VK_KHR_copy_commands2" +typedef struct VkBufferCopy2KHR { + VkStructureType sType; + const void* pNext; + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy2KHR; + +typedef struct VkCopyBufferInfo2KHR { + VkStructureType sType; + const void* pNext; + VkBuffer srcBuffer; + VkBuffer dstBuffer; + uint32_t regionCount; + const VkBufferCopy2KHR* pRegions; +} VkCopyBufferInfo2KHR; + +typedef struct VkImageCopy2KHR { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy2KHR; + +typedef struct VkCopyImageInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageCopy2KHR* pRegions; +} VkCopyImageInfo2KHR; + +typedef struct VkBufferImageCopy2KHR { + VkStructureType sType; + const void* pNext; + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy2KHR; + +typedef struct VkCopyBufferToImageInfo2KHR { + VkStructureType sType; + const void* pNext; + VkBuffer srcBuffer; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkBufferImageCopy2KHR* pRegions; +} VkCopyBufferToImageInfo2KHR; + +typedef struct VkCopyImageToBufferInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkBuffer dstBuffer; + uint32_t regionCount; + const VkBufferImageCopy2KHR* pRegions; +} VkCopyImageToBufferInfo2KHR; + +typedef struct VkImageBlit2KHR { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit2KHR; + +typedef struct VkBlitImageInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageBlit2KHR* pRegions; + VkFilter filter; +} VkBlitImageInfo2KHR; + +typedef struct VkImageResolve2KHR { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve2KHR; + +typedef struct VkResolveImageInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageResolve2KHR* pRegions; +} VkResolveImageInfo2KHR; + +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR* pCopyBufferInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR* pCopyImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2KHR)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR* pBlitImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2KHR)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR* pResolveImageInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2KHR( + VkCommandBuffer commandBuffer, + const VkCopyBufferInfo2KHR* pCopyBufferInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2KHR( + VkCommandBuffer commandBuffer, + const VkCopyImageInfo2KHR* pCopyImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2KHR( + VkCommandBuffer commandBuffer, + const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2KHR( + VkCommandBuffer commandBuffer, + const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2KHR( + VkCommandBuffer commandBuffer, + const VkBlitImageInfo2KHR* pBlitImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2KHR( + VkCommandBuffer commandBuffer, + const VkResolveImageInfo2KHR* pResolveImageInfo); +#endif + + +#define VK_EXT_debug_report 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) +#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 10 +#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" + +typedef enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000150000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportObjectTypeEXT; + +typedef enum VkDebugReportFlagBitsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, + VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportFlagBitsEXT; +typedef VkFlags VkDebugReportFlagsEXT; +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + +typedef struct VkDebugReportCallbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; +} VkDebugReportCallbackCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( + VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugReportCallbackEXT* pCallback); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( + VkInstance instance, + VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( + VkInstance instance, + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage); +#endif + + +#define VK_NV_glsl_shader 1 +#define VK_NV_GLSL_SHADER_SPEC_VERSION 1 +#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" + + +#define VK_EXT_depth_range_unrestricted 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" + + +#define VK_IMG_filter_cubic 1 +#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 +#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" + + +#define VK_AMD_rasterization_order 1 +#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 +#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" + +typedef enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF +} VkRasterizationOrderAMD; +typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { + VkStructureType sType; + const void* pNext; + VkRasterizationOrderAMD rasterizationOrder; +} VkPipelineRasterizationStateRasterizationOrderAMD; + + + +#define VK_AMD_shader_trinary_minmax 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" + + +#define VK_AMD_shader_explicit_vertex_parameter 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" + + +#define VK_EXT_debug_marker 1 +#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 +#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" +typedef struct VkDebugMarkerObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; +} VkDebugMarkerObjectNameInfoEXT; + +typedef struct VkDebugMarkerObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugMarkerObjectTagInfoEXT; + +typedef struct VkDebugMarkerMarkerInfoEXT { + VkStructureType sType; + const void* pNext; + const char* pMarkerName; + float color[4]; +} VkDebugMarkerMarkerInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif + + +#define VK_AMD_gcn_shader 1 +#define VK_AMD_GCN_SHADER_SPEC_VERSION 1 +#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" + + +#define VK_NV_dedicated_allocation 1 +#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" +typedef struct VkDedicatedAllocationImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationImageCreateInfoNV; + +typedef struct VkDedicatedAllocationBufferCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationBufferCreateInfoNV; + +typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkDedicatedAllocationMemoryAllocateInfoNV; + + + +#define VK_EXT_transform_feedback 1 +#define VK_EXT_TRANSFORM_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME "VK_EXT_transform_feedback" +typedef VkFlags VkPipelineRasterizationStateStreamCreateFlagsEXT; +typedef struct VkPhysicalDeviceTransformFeedbackFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 transformFeedback; + VkBool32 geometryStreams; +} VkPhysicalDeviceTransformFeedbackFeaturesEXT; + +typedef struct VkPhysicalDeviceTransformFeedbackPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxTransformFeedbackStreams; + uint32_t maxTransformFeedbackBuffers; + VkDeviceSize maxTransformFeedbackBufferSize; + uint32_t maxTransformFeedbackStreamDataSize; + uint32_t maxTransformFeedbackBufferDataSize; + uint32_t maxTransformFeedbackBufferDataStride; + VkBool32 transformFeedbackQueries; + VkBool32 transformFeedbackStreamsLinesTriangles; + VkBool32 transformFeedbackRasterizationStreamSelect; + VkBool32 transformFeedbackDraw; +} VkPhysicalDeviceTransformFeedbackPropertiesEXT; + +typedef struct VkPipelineRasterizationStateStreamCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateStreamCreateFlagsEXT flags; + uint32_t rasterizationStream; +} VkPipelineRasterizationStateStreamCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBindTransformFeedbackBuffersEXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes); +typedef void (VKAPI_PTR *PFN_vkCmdBeginTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdEndTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdEndQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectByteCountEXT)(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindTransformFeedbackBuffersEXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags, + uint32_t index); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + uint32_t index); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectByteCountEXT( + VkCommandBuffer commandBuffer, + uint32_t instanceCount, + uint32_t firstInstance, + VkBuffer counterBuffer, + VkDeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride); +#endif + + +#define VK_NVX_image_view_handle 1 +#define VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION 2 +#define VK_NVX_IMAGE_VIEW_HANDLE_EXTENSION_NAME "VK_NVX_image_view_handle" +typedef struct VkImageViewHandleInfoNVX { + VkStructureType sType; + const void* pNext; + VkImageView imageView; + VkDescriptorType descriptorType; + VkSampler sampler; +} VkImageViewHandleInfoNVX; + +typedef struct VkImageViewAddressPropertiesNVX { + VkStructureType sType; + void* pNext; + VkDeviceAddress deviceAddress; + VkDeviceSize size; +} VkImageViewAddressPropertiesNVX; + +typedef uint32_t (VKAPI_PTR *PFN_vkGetImageViewHandleNVX)(VkDevice device, const VkImageViewHandleInfoNVX* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetImageViewAddressNVX)(VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR uint32_t VKAPI_CALL vkGetImageViewHandleNVX( + VkDevice device, + const VkImageViewHandleInfoNVX* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageViewAddressNVX( + VkDevice device, + VkImageView imageView, + VkImageViewAddressPropertiesNVX* pProperties); +#endif + + +#define VK_AMD_draw_indirect_count 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 2 +#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_AMD_negative_viewport_height 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" + + +#define VK_AMD_gpu_shader_half_float 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" + + +#define VK_AMD_shader_ballot 1 +#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 +#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" + + +#define VK_AMD_texture_gather_bias_lod 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" +typedef struct VkTextureLODGatherFormatPropertiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 supportsTextureGatherLODBiasAMD; +} VkTextureLODGatherFormatPropertiesAMD; + + + +#define VK_AMD_shader_info 1 +#define VK_AMD_SHADER_INFO_SPEC_VERSION 1 +#define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info" + +typedef enum VkShaderInfoTypeAMD { + VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0, + VK_SHADER_INFO_TYPE_BINARY_AMD = 1, + VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2, + VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderInfoTypeAMD; +typedef struct VkShaderResourceUsageAMD { + uint32_t numUsedVgprs; + uint32_t numUsedSgprs; + uint32_t ldsSizePerLocalWorkGroup; + size_t ldsUsageSizeInBytes; + size_t scratchMemUsageInBytes; +} VkShaderResourceUsageAMD; + +typedef struct VkShaderStatisticsInfoAMD { + VkShaderStageFlags shaderStageMask; + VkShaderResourceUsageAMD resourceUsage; + uint32_t numPhysicalVgprs; + uint32_t numPhysicalSgprs; + uint32_t numAvailableVgprs; + uint32_t numAvailableSgprs; + uint32_t computeWorkGroupSize[3]; +} VkShaderStatisticsInfoAMD; + +typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD( + VkDevice device, + VkPipeline pipeline, + VkShaderStageFlagBits shaderStage, + VkShaderInfoTypeAMD infoType, + size_t* pInfoSize, + void* pInfo); +#endif + + +#define VK_AMD_shader_image_load_store_lod 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod" + + +#define VK_NV_corner_sampled_image 1 +#define VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION 2 +#define VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME "VK_NV_corner_sampled_image" +typedef struct VkPhysicalDeviceCornerSampledImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cornerSampledImage; +} VkPhysicalDeviceCornerSampledImageFeaturesNV; + + + +#define VK_IMG_format_pvrtc 1 +#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 +#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" + + +#define VK_NV_external_memory_capabilities 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" + +typedef enum VkExternalMemoryHandleTypeFlagBitsNV { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsNV; +typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + +typedef enum VkExternalMemoryFeatureFlagBitsNV { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsNV; +typedef VkFlags VkExternalMemoryFeatureFlagsNV; +typedef struct VkExternalImageFormatPropertiesNV { + VkImageFormatProperties imageFormatProperties; + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; +} VkExternalImageFormatPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); +#endif + + +#define VK_NV_external_memory 1 +#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" +typedef struct VkExternalMemoryImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExternalMemoryImageCreateInfoNV; + +typedef struct VkExportMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExportMemoryAllocateInfoNV; + + + +#define VK_EXT_validation_flags 1 +#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 2 +#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" + +typedef enum VkValidationCheckEXT { + VK_VALIDATION_CHECK_ALL_EXT = 0, + VK_VALIDATION_CHECK_SHADERS_EXT = 1, + VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCheckEXT; +typedef struct VkValidationFlagsEXT { + VkStructureType sType; + const void* pNext; + uint32_t disabledValidationCheckCount; + const VkValidationCheckEXT* pDisabledValidationChecks; +} VkValidationFlagsEXT; + + + +#define VK_EXT_shader_subgroup_ballot 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" + + +#define VK_EXT_shader_subgroup_vote 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" + + +#define VK_EXT_texture_compression_astc_hdr 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME "VK_EXT_texture_compression_astc_hdr" +typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 textureCompressionASTC_HDR; +} VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT; + + + +#define VK_EXT_astc_decode_mode 1 +#define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1 +#define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME "VK_EXT_astc_decode_mode" +typedef struct VkImageViewASTCDecodeModeEXT { + VkStructureType sType; + const void* pNext; + VkFormat decodeMode; +} VkImageViewASTCDecodeModeEXT; + +typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 decodeModeSharedExponent; +} VkPhysicalDeviceASTCDecodeFeaturesEXT; + + + +#define VK_EXT_conditional_rendering 1 +#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2 +#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering" + +typedef enum VkConditionalRenderingFlagBitsEXT { + VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT = 0x00000001, + VK_CONDITIONAL_RENDERING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConditionalRenderingFlagBitsEXT; +typedef VkFlags VkConditionalRenderingFlagsEXT; +typedef struct VkConditionalRenderingBeginInfoEXT { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceSize offset; + VkConditionalRenderingFlagsEXT flags; +} VkConditionalRenderingBeginInfoEXT; + +typedef struct VkPhysicalDeviceConditionalRenderingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 conditionalRendering; + VkBool32 inheritedConditionalRendering; +} VkPhysicalDeviceConditionalRenderingFeaturesEXT; + +typedef struct VkCommandBufferInheritanceConditionalRenderingInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 conditionalRenderingEnable; +} VkCommandBufferInheritanceConditionalRenderingInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBeginConditionalRenderingEXT)(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); +typedef void (VKAPI_PTR *PFN_vkCmdEndConditionalRenderingEXT)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginConditionalRenderingEXT( + VkCommandBuffer commandBuffer, + const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndConditionalRenderingEXT( + VkCommandBuffer commandBuffer); +#endif + + +#define VK_NV_clip_space_w_scaling 1 +#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 +#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" +typedef struct VkViewportWScalingNV { + float xcoeff; + float ycoeff; +} VkViewportWScalingNV; + +typedef struct VkPipelineViewportWScalingStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportWScalingEnable; + uint32_t viewportCount; + const VkViewportWScalingNV* pViewportWScalings; +} VkPipelineViewportWScalingStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings); +#endif + + +#define VK_EXT_direct_mode_display 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" +typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); +#endif + + +#define VK_EXT_display_surface_counter 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" + +typedef enum VkSurfaceCounterFlagBitsEXT { + VK_SURFACE_COUNTER_VBLANK_BIT_EXT = 0x00000001, + VK_SURFACE_COUNTER_VBLANK_EXT = VK_SURFACE_COUNTER_VBLANK_BIT_EXT, + VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSurfaceCounterFlagBitsEXT; +typedef VkFlags VkSurfaceCounterFlagsEXT; +typedef struct VkSurfaceCapabilities2EXT { + VkStructureType sType; + void* pNext; + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; + VkSurfaceCounterFlagsEXT supportedSurfaceCounters; +} VkSurfaceCapabilities2EXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); +#endif + + +#define VK_EXT_display_control 1 +#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" + +typedef enum VkDisplayPowerStateEXT { + VK_DISPLAY_POWER_STATE_OFF_EXT = 0, + VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, + VK_DISPLAY_POWER_STATE_ON_EXT = 2, + VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayPowerStateEXT; + +typedef enum VkDeviceEventTypeEXT { + VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, + VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceEventTypeEXT; + +typedef enum VkDisplayEventTypeEXT { + VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, + VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayEventTypeEXT; +typedef struct VkDisplayPowerInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayPowerStateEXT powerState; +} VkDisplayPowerInfoEXT; + +typedef struct VkDeviceEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceEventTypeEXT deviceEvent; +} VkDeviceEventInfoEXT; + +typedef struct VkDisplayEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayEventTypeEXT displayEvent; +} VkDisplayEventInfoEXT; + +typedef struct VkSwapchainCounterCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSurfaceCounterFlagsEXT surfaceCounters; +} VkSwapchainCounterCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue); +#endif + + +#define VK_GOOGLE_display_timing 1 +#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 +#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" +typedef struct VkRefreshCycleDurationGOOGLE { + uint64_t refreshDuration; +} VkRefreshCycleDurationGOOGLE; + +typedef struct VkPastPresentationTimingGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; +} VkPastPresentationTimingGOOGLE; + +typedef struct VkPresentTimeGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; +} VkPresentTimeGOOGLE; + +typedef struct VkPresentTimesInfoGOOGLE { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimeGOOGLE* pTimes; +} VkPresentTimesInfoGOOGLE; + +typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings); +#endif + + +#define VK_NV_sample_mask_override_coverage 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" + + +#define VK_NV_geometry_shader_passthrough 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" + + +#define VK_NV_viewport_array2 1 +#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2" + + +#define VK_NVX_multiview_per_view_attributes 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" +typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + VkStructureType sType; + void* pNext; + VkBool32 perViewPositionAllComponents; +} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + + + +#define VK_NV_viewport_swizzle 1 +#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" + +typedef enum VkViewportCoordinateSwizzleNV { + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, + VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF +} VkViewportCoordinateSwizzleNV; +typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; +typedef struct VkViewportSwizzleNV { + VkViewportCoordinateSwizzleNV x; + VkViewportCoordinateSwizzleNV y; + VkViewportCoordinateSwizzleNV z; + VkViewportCoordinateSwizzleNV w; +} VkViewportSwizzleNV; + +typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const VkViewportSwizzleNV* pViewportSwizzles; +} VkPipelineViewportSwizzleStateCreateInfoNV; + + + +#define VK_EXT_discard_rectangles 1 +#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1 +#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" + +typedef enum VkDiscardRectangleModeEXT { + VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, + VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, + VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDiscardRectangleModeEXT; +typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxDiscardRectangles; +} VkPhysicalDeviceDiscardRectanglePropertiesEXT; + +typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineDiscardRectangleStateCreateFlagsEXT flags; + VkDiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const VkRect2D* pDiscardRectangles; +} VkPipelineDiscardRectangleStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles); +#endif + + +#define VK_EXT_conservative_rasterization 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization" + +typedef enum VkConservativeRasterizationModeEXT { + VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0, + VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1, + VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2, + VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConservativeRasterizationModeEXT; +typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + float primitiveOverestimationSize; + float maxExtraPrimitiveOverestimationSize; + float extraPrimitiveOverestimationSizeGranularity; + VkBool32 primitiveUnderestimation; + VkBool32 conservativePointAndLineRasterization; + VkBool32 degenerateTrianglesRasterized; + VkBool32 degenerateLinesRasterized; + VkBool32 fullyCoveredFragmentShaderInputVariable; + VkBool32 conservativeRasterizationPostDepthCoverage; +} VkPhysicalDeviceConservativeRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationConservativeStateCreateFlagsEXT flags; + VkConservativeRasterizationModeEXT conservativeRasterizationMode; + float extraPrimitiveOverestimationSize; +} VkPipelineRasterizationConservativeStateCreateInfoEXT; + + + +#define VK_EXT_depth_clip_enable 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_SPEC_VERSION 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME "VK_EXT_depth_clip_enable" +typedef VkFlags VkPipelineRasterizationDepthClipStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDepthClipEnableFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 depthClipEnable; +} VkPhysicalDeviceDepthClipEnableFeaturesEXT; + +typedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationDepthClipStateCreateFlagsEXT flags; + VkBool32 depthClipEnable; +} VkPipelineRasterizationDepthClipStateCreateInfoEXT; + + + +#define VK_EXT_swapchain_colorspace 1 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 4 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" + + +#define VK_EXT_hdr_metadata 1 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 2 +#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" +typedef struct VkXYColorEXT { + float x; + float y; +} VkXYColorEXT; + +typedef struct VkHdrMetadataEXT { + VkStructureType sType; + const void* pNext; + VkXYColorEXT displayPrimaryRed; + VkXYColorEXT displayPrimaryGreen; + VkXYColorEXT displayPrimaryBlue; + VkXYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; +} VkHdrMetadataEXT; + +typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata); +#endif + + +#define VK_EXT_external_memory_dma_buf 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf" + + +#define VK_EXT_queue_family_foreign 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign" +#define VK_QUEUE_FAMILY_FOREIGN_EXT (~2U) + + +#define VK_EXT_debug_utils 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) +#define VK_EXT_DEBUG_UTILS_SPEC_VERSION 2 +#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils" +typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; + +typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageSeverityFlagBitsEXT; + +typedef enum VkDebugUtilsMessageTypeFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, + VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageTypeFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; +typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; +typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; +typedef struct VkDebugUtilsLabelEXT { + VkStructureType sType; + const void* pNext; + const char* pLabelName; + float color[4]; +} VkDebugUtilsLabelEXT; + +typedef struct VkDebugUtilsObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + const char* pObjectName; +} VkDebugUtilsObjectNameInfoEXT; + +typedef struct VkDebugUtilsMessengerCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCallbackDataFlagsEXT flags; + const char* pMessageIdName; + int32_t messageIdNumber; + const char* pMessage; + uint32_t queueLabelCount; + const VkDebugUtilsLabelEXT* pQueueLabels; + uint32_t cmdBufLabelCount; + const VkDebugUtilsLabelEXT* pCmdBufLabels; + uint32_t objectCount; + const VkDebugUtilsObjectNameInfoEXT* pObjects; +} VkDebugUtilsMessengerCallbackDataEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDebugUtilsMessengerCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCreateFlagsEXT flags; + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; + VkDebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; + void* pUserData; +} VkDebugUtilsMessengerCreateInfoEXT; + +typedef struct VkDebugUtilsObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugUtilsObjectTagInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); +typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue); +typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT( + VkQueue queue); + +VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( + VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugUtilsMessengerEXT* pMessenger); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( + VkInstance instance, + VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT( + VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); +#endif + + +#define VK_EXT_sampler_filter_minmax 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 2 +#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" +typedef VkSamplerReductionMode VkSamplerReductionModeEXT; + +typedef VkSamplerReductionModeCreateInfo VkSamplerReductionModeCreateInfoEXT; + +typedef VkPhysicalDeviceSamplerFilterMinmaxProperties VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + + +#define VK_AMD_gpu_shader_int16 1 +#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" + + +#define VK_AMD_mixed_attachment_samples 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" + + +#define VK_AMD_shader_fragment_mask 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" + + +#define VK_EXT_inline_uniform_block 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME "VK_EXT_inline_uniform_block" +typedef struct VkPhysicalDeviceInlineUniformBlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 inlineUniformBlock; + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; +} VkPhysicalDeviceInlineUniformBlockFeaturesEXT; + +typedef struct VkPhysicalDeviceInlineUniformBlockPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxInlineUniformBlockSize; + uint32_t maxPerStageDescriptorInlineUniformBlocks; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; + uint32_t maxDescriptorSetInlineUniformBlocks; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; +} VkPhysicalDeviceInlineUniformBlockPropertiesEXT; + +typedef struct VkWriteDescriptorSetInlineUniformBlockEXT { + VkStructureType sType; + const void* pNext; + uint32_t dataSize; + const void* pData; +} VkWriteDescriptorSetInlineUniformBlockEXT; + +typedef struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t maxInlineUniformBlockBindings; +} VkDescriptorPoolInlineUniformBlockCreateInfoEXT; + + + +#define VK_EXT_shader_stencil_export 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" + + +#define VK_EXT_sample_locations 1 +#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 +#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" +typedef struct VkSampleLocationEXT { + float x; + float y; +} VkSampleLocationEXT; + +typedef struct VkSampleLocationsInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampleCountFlagBits sampleLocationsPerPixel; + VkExtent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const VkSampleLocationEXT* pSampleLocations; +} VkSampleLocationsInfoEXT; + +typedef struct VkAttachmentSampleLocationsEXT { + uint32_t attachmentIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkAttachmentSampleLocationsEXT; + +typedef struct VkSubpassSampleLocationsEXT { + uint32_t subpassIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkSubpassSampleLocationsEXT; + +typedef struct VkRenderPassSampleLocationsBeginInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentInitialSampleLocationsCount; + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations; +} VkRenderPassSampleLocationsBeginInfoEXT; + +typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 sampleLocationsEnable; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkPipelineSampleLocationsStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleLocationSampleCounts; + VkExtent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + VkBool32 variableSampleLocations; +} VkPhysicalDeviceSampleLocationsPropertiesEXT; + +typedef struct VkMultisamplePropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D maxSampleLocationGridSize; +} VkMultisamplePropertiesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties); +#endif + + +#define VK_EXT_blend_operation_advanced 1 +#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 +#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" + +typedef enum VkBlendOverlapEXT { + VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, + VK_BLEND_OVERLAP_DISJOINT_EXT = 1, + VK_BLEND_OVERLAP_CONJOINT_EXT = 2, + VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBlendOverlapEXT; +typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 advancedBlendCoherentOperations; +} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t advancedBlendMaxColorAttachments; + VkBool32 advancedBlendIndependentBlend; + VkBool32 advancedBlendNonPremultipliedSrcColor; + VkBool32 advancedBlendNonPremultipliedDstColor; + VkBool32 advancedBlendCorrelatedOverlap; + VkBool32 advancedBlendAllOperations; +} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; +} VkPipelineColorBlendAdvancedStateCreateInfoEXT; + + + +#define VK_NV_fragment_coverage_to_color 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" +typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; +typedef struct VkPipelineCoverageToColorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageToColorStateCreateFlagsNV flags; + VkBool32 coverageToColorEnable; + uint32_t coverageToColorLocation; +} VkPipelineCoverageToColorStateCreateInfoNV; + + + +#define VK_NV_framebuffer_mixed_samples 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" + +typedef enum VkCoverageModulationModeNV { + VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, + VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, + VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, + VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, + VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageModulationModeNV; +typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; +typedef struct VkPipelineCoverageModulationStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageModulationStateCreateFlagsNV flags; + VkCoverageModulationModeNV coverageModulationMode; + VkBool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; +} VkPipelineCoverageModulationStateCreateInfoNV; + + + +#define VK_NV_fill_rectangle 1 +#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 +#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" + + +#define VK_NV_shader_sm_builtins 1 +#define VK_NV_SHADER_SM_BUILTINS_SPEC_VERSION 1 +#define VK_NV_SHADER_SM_BUILTINS_EXTENSION_NAME "VK_NV_shader_sm_builtins" +typedef struct VkPhysicalDeviceShaderSMBuiltinsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderSMCount; + uint32_t shaderWarpsPerSM; +} VkPhysicalDeviceShaderSMBuiltinsPropertiesNV; + +typedef struct VkPhysicalDeviceShaderSMBuiltinsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shaderSMBuiltins; +} VkPhysicalDeviceShaderSMBuiltinsFeaturesNV; + + + +#define VK_EXT_post_depth_coverage 1 +#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 +#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" + + +#define VK_EXT_image_drm_format_modifier 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME "VK_EXT_image_drm_format_modifier" +typedef struct VkDrmFormatModifierPropertiesEXT { + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + VkFormatFeatureFlags drmFormatModifierTilingFeatures; +} VkDrmFormatModifierPropertiesEXT; + +typedef struct VkDrmFormatModifierPropertiesListEXT { + VkStructureType sType; + void* pNext; + uint32_t drmFormatModifierCount; + VkDrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties; +} VkDrmFormatModifierPropertiesListEXT; + +typedef struct VkPhysicalDeviceImageDrmFormatModifierInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkPhysicalDeviceImageDrmFormatModifierInfoEXT; + +typedef struct VkImageDrmFormatModifierListCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t drmFormatModifierCount; + const uint64_t* pDrmFormatModifiers; +} VkImageDrmFormatModifierListCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierExplicitCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + const VkSubresourceLayout* pPlaneLayouts; +} VkImageDrmFormatModifierExplicitCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierPropertiesEXT { + VkStructureType sType; + void* pNext; + uint64_t drmFormatModifier; +} VkImageDrmFormatModifierPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetImageDrmFormatModifierPropertiesEXT)(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageDrmFormatModifierPropertiesEXT( + VkDevice device, + VkImage image, + VkImageDrmFormatModifierPropertiesEXT* pProperties); +#endif + + +#define VK_EXT_validation_cache 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) +#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" + +typedef enum VkValidationCacheHeaderVersionEXT { + VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, + VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCacheHeaderVersionEXT; +typedef VkFlags VkValidationCacheCreateFlagsEXT; +typedef struct VkValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; +} VkValidationCacheCreateInfoEXT; + +typedef struct VkShaderModuleValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheEXT validationCache; +} VkShaderModuleValidationCacheCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); +typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData); +#endif + + +#define VK_EXT_descriptor_indexing 1 +#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2 +#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing" +typedef VkDescriptorBindingFlagBits VkDescriptorBindingFlagBitsEXT; + +typedef VkDescriptorBindingFlags VkDescriptorBindingFlagsEXT; + +typedef VkDescriptorSetLayoutBindingFlagsCreateInfo VkDescriptorSetLayoutBindingFlagsCreateInfoEXT; + +typedef VkPhysicalDeviceDescriptorIndexingFeatures VkPhysicalDeviceDescriptorIndexingFeaturesEXT; + +typedef VkPhysicalDeviceDescriptorIndexingProperties VkPhysicalDeviceDescriptorIndexingPropertiesEXT; + +typedef VkDescriptorSetVariableDescriptorCountAllocateInfo VkDescriptorSetVariableDescriptorCountAllocateInfoEXT; + +typedef VkDescriptorSetVariableDescriptorCountLayoutSupport VkDescriptorSetVariableDescriptorCountLayoutSupportEXT; + + + +#define VK_EXT_shader_viewport_index_layer 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" + + +#define VK_NV_shading_rate_image 1 +#define VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION 3 +#define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME "VK_NV_shading_rate_image" + +typedef enum VkShadingRatePaletteEntryNV { + VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV = 0, + VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV = 1, + VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV = 2, + VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV = 3, + VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV = 4, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV = 5, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV = 6, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV = 7, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV = 8, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV = 10, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV = 11, + VK_SHADING_RATE_PALETTE_ENTRY_MAX_ENUM_NV = 0x7FFFFFFF +} VkShadingRatePaletteEntryNV; + +typedef enum VkCoarseSampleOrderTypeNV { + VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV = 0, + VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV = 1, + VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV = 2, + VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3, + VK_COARSE_SAMPLE_ORDER_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoarseSampleOrderTypeNV; +typedef struct VkShadingRatePaletteNV { + uint32_t shadingRatePaletteEntryCount; + const VkShadingRatePaletteEntryNV* pShadingRatePaletteEntries; +} VkShadingRatePaletteNV; + +typedef struct VkPipelineViewportShadingRateImageStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 shadingRateImageEnable; + uint32_t viewportCount; + const VkShadingRatePaletteNV* pShadingRatePalettes; +} VkPipelineViewportShadingRateImageStateCreateInfoNV; + +typedef struct VkPhysicalDeviceShadingRateImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shadingRateImage; + VkBool32 shadingRateCoarseSampleOrder; +} VkPhysicalDeviceShadingRateImageFeaturesNV; + +typedef struct VkPhysicalDeviceShadingRateImagePropertiesNV { + VkStructureType sType; + void* pNext; + VkExtent2D shadingRateTexelSize; + uint32_t shadingRatePaletteSize; + uint32_t shadingRateMaxCoarseSamples; +} VkPhysicalDeviceShadingRateImagePropertiesNV; + +typedef struct VkCoarseSampleLocationNV { + uint32_t pixelX; + uint32_t pixelY; + uint32_t sample; +} VkCoarseSampleLocationNV; + +typedef struct VkCoarseSampleOrderCustomNV { + VkShadingRatePaletteEntryNV shadingRate; + uint32_t sampleCount; + uint32_t sampleLocationCount; + const VkCoarseSampleLocationNV* pSampleLocations; +} VkCoarseSampleOrderCustomNV; + +typedef struct VkPipelineViewportCoarseSampleOrderStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkCoarseSampleOrderTypeNV sampleOrderType; + uint32_t customSampleOrderCount; + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders; +} VkPipelineViewportCoarseSampleOrderStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdBindShadingRateImageNV)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportShadingRatePaletteNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoarseSampleOrderNV)(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindShadingRateImageNV( + VkCommandBuffer commandBuffer, + VkImageView imageView, + VkImageLayout imageLayout); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportShadingRatePaletteNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkShadingRatePaletteNV* pShadingRatePalettes); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV( + VkCommandBuffer commandBuffer, + VkCoarseSampleOrderTypeNV sampleOrderType, + uint32_t customSampleOrderCount, + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); +#endif + + +#define VK_NV_ray_tracing 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) +#define VK_NV_RAY_TRACING_SPEC_VERSION 3 +#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" +#define VK_SHADER_UNUSED_KHR (~0U) +#define VK_SHADER_UNUSED_NV VK_SHADER_UNUSED_KHR + +typedef enum VkRayTracingShaderGroupTypeKHR { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR = 2, + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkRayTracingShaderGroupTypeKHR; +typedef VkRayTracingShaderGroupTypeKHR VkRayTracingShaderGroupTypeNV; + + +typedef enum VkGeometryTypeKHR { + VK_GEOMETRY_TYPE_TRIANGLES_KHR = 0, + VK_GEOMETRY_TYPE_AABBS_KHR = 1, + VK_GEOMETRY_TYPE_INSTANCES_KHR = 2, + VK_GEOMETRY_TYPE_TRIANGLES_NV = VK_GEOMETRY_TYPE_TRIANGLES_KHR, + VK_GEOMETRY_TYPE_AABBS_NV = VK_GEOMETRY_TYPE_AABBS_KHR, + VK_GEOMETRY_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryTypeKHR; +typedef VkGeometryTypeKHR VkGeometryTypeNV; + + +typedef enum VkAccelerationStructureTypeKHR { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR = 1, + VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR = 2, + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureTypeKHR; +typedef VkAccelerationStructureTypeKHR VkAccelerationStructureTypeNV; + + +typedef enum VkCopyAccelerationStructureModeKHR { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR = 1, + VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR = 2, + VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR = 3, + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR, + VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCopyAccelerationStructureModeKHR; +typedef VkCopyAccelerationStructureModeKHR VkCopyAccelerationStructureModeNV; + + +typedef enum VkAccelerationStructureMemoryRequirementsTypeNV { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureMemoryRequirementsTypeNV; + +typedef enum VkGeometryFlagBitsKHR { + VK_GEOMETRY_OPAQUE_BIT_KHR = 0x00000001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR = 0x00000002, + VK_GEOMETRY_OPAQUE_BIT_NV = VK_GEOMETRY_OPAQUE_BIT_KHR, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR, + VK_GEOMETRY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryFlagBitsKHR; +typedef VkFlags VkGeometryFlagsKHR; +typedef VkGeometryFlagsKHR VkGeometryFlagsNV; + +typedef VkGeometryFlagBitsKHR VkGeometryFlagBitsNV; + + +typedef enum VkGeometryInstanceFlagBitsKHR { + VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR = 0x00000001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = 0x00000002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR = 0x00000004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR = 0x00000008, + VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryInstanceFlagBitsKHR; +typedef VkFlags VkGeometryInstanceFlagsKHR; +typedef VkGeometryInstanceFlagsKHR VkGeometryInstanceFlagsNV; + +typedef VkGeometryInstanceFlagBitsKHR VkGeometryInstanceFlagBitsNV; + + +typedef enum VkBuildAccelerationStructureFlagBitsKHR { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR = 0x00000001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR = 0x00000002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR = 0x00000004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR = 0x00000008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR = 0x00000010, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkBuildAccelerationStructureFlagBitsKHR; +typedef VkFlags VkBuildAccelerationStructureFlagsKHR; +typedef VkBuildAccelerationStructureFlagsKHR VkBuildAccelerationStructureFlagsNV; + +typedef VkBuildAccelerationStructureFlagBitsKHR VkBuildAccelerationStructureFlagBitsNV; + +typedef struct VkRayTracingShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; +} VkRayTracingShaderGroupCreateInfoNV; + +typedef struct VkRayTracingPipelineCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoNV* pGroups; + uint32_t maxRecursionDepth; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoNV; + +typedef struct VkGeometryTrianglesNV { + VkStructureType sType; + const void* pNext; + VkBuffer vertexData; + VkDeviceSize vertexOffset; + uint32_t vertexCount; + VkDeviceSize vertexStride; + VkFormat vertexFormat; + VkBuffer indexData; + VkDeviceSize indexOffset; + uint32_t indexCount; + VkIndexType indexType; + VkBuffer transformData; + VkDeviceSize transformOffset; +} VkGeometryTrianglesNV; + +typedef struct VkGeometryAABBNV { + VkStructureType sType; + const void* pNext; + VkBuffer aabbData; + uint32_t numAABBs; + uint32_t stride; + VkDeviceSize offset; +} VkGeometryAABBNV; + +typedef struct VkGeometryDataNV { + VkGeometryTrianglesNV triangles; + VkGeometryAABBNV aabbs; +} VkGeometryDataNV; + +typedef struct VkGeometryNV { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkGeometryDataNV geometry; + VkGeometryFlagsKHR flags; +} VkGeometryNV; + +typedef struct VkAccelerationStructureInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeNV type; + VkBuildAccelerationStructureFlagsNV flags; + uint32_t instanceCount; + uint32_t geometryCount; + const VkGeometryNV* pGeometries; +} VkAccelerationStructureInfoNV; + +typedef struct VkAccelerationStructureCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceSize compactedSize; + VkAccelerationStructureInfoNV info; +} VkAccelerationStructureCreateInfoNV; + +typedef struct VkBindAccelerationStructureMemoryInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureNV accelerationStructure; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindAccelerationStructureMemoryInfoNV; + +typedef struct VkWriteDescriptorSetAccelerationStructureNV { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureNV* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureNV; + +typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureMemoryRequirementsTypeNV type; + VkAccelerationStructureNV accelerationStructure; +} VkAccelerationStructureMemoryRequirementsInfoNV; + +typedef struct VkPhysicalDeviceRayTracingPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxTriangleCount; + uint32_t maxDescriptorSetAccelerationStructures; +} VkPhysicalDeviceRayTracingPropertiesNV; + +typedef struct VkTransformMatrixKHR { + float matrix[3][4]; +} VkTransformMatrixKHR; + +typedef VkTransformMatrixKHR VkTransformMatrixNV; + +typedef struct VkAabbPositionsKHR { + float minX; + float minY; + float minZ; + float maxX; + float maxY; + float maxZ; +} VkAabbPositionsKHR; + +typedef VkAabbPositionsKHR VkAabbPositionsNV; + +typedef struct VkAccelerationStructureInstanceKHR { + VkTransformMatrixKHR transform; + uint32_t instanceCustomIndex:24; + uint32_t mask:8; + uint32_t instanceShaderBindingTableRecordOffset:24; + VkGeometryInstanceFlagsKHR flags:8; + uint64_t accelerationStructureReference; +} VkAccelerationStructureInstanceKHR; + +typedef VkAccelerationStructureInstanceKHR VkAccelerationStructureInstanceNV; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( + VkDevice device, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureNV* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( + VkDevice device, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( + VkDevice device, + uint32_t bindInfoCount, + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( + VkCommandBuffer commandBuffer, + const VkAccelerationStructureInfoNV* pInfo, + VkBuffer instanceData, + VkDeviceSize instanceOffset, + VkBool32 update, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkBuffer scratch, + VkDeviceSize scratchOffset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( + VkCommandBuffer commandBuffer, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkCopyAccelerationStructureModeKHR mode); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( + VkCommandBuffer commandBuffer, + VkBuffer raygenShaderBindingTableBuffer, + VkDeviceSize raygenShaderBindingOffset, + VkBuffer missShaderBindingTableBuffer, + VkDeviceSize missShaderBindingOffset, + VkDeviceSize missShaderBindingStride, + VkBuffer hitShaderBindingTableBuffer, + VkDeviceSize hitShaderBindingOffset, + VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureNV* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV( + VkDevice device, + VkPipeline pipeline, + uint32_t shader); +#endif + + +#define VK_NV_representative_fragment_test 1 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 2 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME "VK_NV_representative_fragment_test" +typedef struct VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 representativeFragmentTest; +} VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV; + +typedef struct VkPipelineRepresentativeFragmentTestStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 representativeFragmentTestEnable; +} VkPipelineRepresentativeFragmentTestStateCreateInfoNV; + + + +#define VK_EXT_filter_cubic 1 +#define VK_EXT_FILTER_CUBIC_SPEC_VERSION 3 +#define VK_EXT_FILTER_CUBIC_EXTENSION_NAME "VK_EXT_filter_cubic" +typedef struct VkPhysicalDeviceImageViewImageFormatInfoEXT { + VkStructureType sType; + void* pNext; + VkImageViewType imageViewType; +} VkPhysicalDeviceImageViewImageFormatInfoEXT; + +typedef struct VkFilterCubicImageViewImageFormatPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 filterCubic; + VkBool32 filterCubicMinmax; +} VkFilterCubicImageViewImageFormatPropertiesEXT; + + + +#define VK_QCOM_render_pass_shader_resolve 1 +#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_SPEC_VERSION 4 +#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME "VK_QCOM_render_pass_shader_resolve" + + +#define VK_EXT_global_priority 1 +#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 +#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" + +typedef enum VkQueueGlobalPriorityEXT { + VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = 128, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = 256, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = 512, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = 1024, + VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_EXT = 0x7FFFFFFF +} VkQueueGlobalPriorityEXT; +typedef struct VkDeviceQueueGlobalPriorityCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkQueueGlobalPriorityEXT globalPriority; +} VkDeviceQueueGlobalPriorityCreateInfoEXT; + + + +#define VK_EXT_external_memory_host 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host" +typedef struct VkImportMemoryHostPointerInfoEXT { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + void* pHostPointer; +} VkImportMemoryHostPointerInfoEXT; + +typedef struct VkMemoryHostPointerPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryHostPointerPropertiesEXT; + +typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize minImportedHostPointerAlignment; +} VkPhysicalDeviceExternalMemoryHostPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + const void* pHostPointer, + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); +#endif + + +#define VK_AMD_buffer_marker 1 +#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1 +#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker" +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); +#endif + + +#define VK_AMD_pipeline_compiler_control 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_EXTENSION_NAME "VK_AMD_pipeline_compiler_control" + +typedef enum VkPipelineCompilerControlFlagBitsAMD { + VK_PIPELINE_COMPILER_CONTROL_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkPipelineCompilerControlFlagBitsAMD; +typedef VkFlags VkPipelineCompilerControlFlagsAMD; +typedef struct VkPipelineCompilerControlCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkPipelineCompilerControlFlagsAMD compilerControlFlags; +} VkPipelineCompilerControlCreateInfoAMD; + + + +#define VK_EXT_calibrated_timestamps 1 +#define VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION 2 +#define VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME "VK_EXT_calibrated_timestamps" + +typedef enum VkTimeDomainEXT { + VK_TIME_DOMAIN_DEVICE_EXT = 0, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT = 1, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT = 2, + VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = 3, + VK_TIME_DOMAIN_MAX_ENUM_EXT = 0x7FFFFFFF +} VkTimeDomainEXT; +typedef struct VkCalibratedTimestampInfoEXT { + VkStructureType sType; + const void* pNext; + VkTimeDomainEXT timeDomain; +} VkCalibratedTimestampInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains); +typedef VkResult (VKAPI_PTR *PFN_vkGetCalibratedTimestampsEXT)(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pTimeDomainCount, + VkTimeDomainEXT* pTimeDomains); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsEXT( + VkDevice device, + uint32_t timestampCount, + const VkCalibratedTimestampInfoEXT* pTimestampInfos, + uint64_t* pTimestamps, + uint64_t* pMaxDeviation); +#endif + + +#define VK_AMD_shader_core_properties 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 2 +#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" +typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { + VkStructureType sType; + void* pNext; + uint32_t shaderEngineCount; + uint32_t shaderArraysPerEngineCount; + uint32_t computeUnitsPerShaderArray; + uint32_t simdPerComputeUnit; + uint32_t wavefrontsPerSimd; + uint32_t wavefrontSize; + uint32_t sgprsPerSimd; + uint32_t minSgprAllocation; + uint32_t maxSgprAllocation; + uint32_t sgprAllocationGranularity; + uint32_t vgprsPerSimd; + uint32_t minVgprAllocation; + uint32_t maxVgprAllocation; + uint32_t vgprAllocationGranularity; +} VkPhysicalDeviceShaderCorePropertiesAMD; + + + +#define VK_AMD_memory_overallocation_behavior 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior" + +typedef enum VkMemoryOverallocationBehaviorAMD { + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF +} VkMemoryOverallocationBehaviorAMD; +typedef struct VkDeviceMemoryOverallocationCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkMemoryOverallocationBehaviorAMD overallocationBehavior; +} VkDeviceMemoryOverallocationCreateInfoAMD; + + + +#define VK_EXT_vertex_attribute_divisor 1 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" +typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; +} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; + +typedef struct VkVertexInputBindingDivisorDescriptionEXT { + uint32_t binding; + uint32_t divisor; +} VkVertexInputBindingDivisorDescriptionEXT; + +typedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t vertexBindingDivisorCount; + const VkVertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors; +} VkPipelineVertexInputDivisorStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; +} VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; + + + +#define VK_EXT_pipeline_creation_feedback 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME "VK_EXT_pipeline_creation_feedback" + +typedef enum VkPipelineCreationFeedbackFlagBitsEXT { + VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = 0x00000001, + VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = 0x00000002, + VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = 0x00000004, + VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPipelineCreationFeedbackFlagBitsEXT; +typedef VkFlags VkPipelineCreationFeedbackFlagsEXT; +typedef struct VkPipelineCreationFeedbackEXT { + VkPipelineCreationFeedbackFlagsEXT flags; + uint64_t duration; +} VkPipelineCreationFeedbackEXT; + +typedef struct VkPipelineCreationFeedbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineCreationFeedbackEXT* pPipelineCreationFeedback; + uint32_t pipelineStageCreationFeedbackCount; + VkPipelineCreationFeedbackEXT* pPipelineStageCreationFeedbacks; +} VkPipelineCreationFeedbackCreateInfoEXT; + + + +#define VK_NV_shader_subgroup_partitioned 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned" + + +#define VK_NV_compute_shader_derivatives 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_NV_compute_shader_derivatives" +typedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 computeDerivativeGroupQuads; + VkBool32 computeDerivativeGroupLinear; +} VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; + + + +#define VK_NV_mesh_shader 1 +#define VK_NV_MESH_SHADER_SPEC_VERSION 1 +#define VK_NV_MESH_SHADER_EXTENSION_NAME "VK_NV_mesh_shader" +typedef struct VkPhysicalDeviceMeshShaderFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 taskShader; + VkBool32 meshShader; +} VkPhysicalDeviceMeshShaderFeaturesNV; + +typedef struct VkPhysicalDeviceMeshShaderPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxDrawMeshTasksCount; + uint32_t maxTaskWorkGroupInvocations; + uint32_t maxTaskWorkGroupSize[3]; + uint32_t maxTaskTotalMemorySize; + uint32_t maxTaskOutputCount; + uint32_t maxMeshWorkGroupInvocations; + uint32_t maxMeshWorkGroupSize[3]; + uint32_t maxMeshTotalMemorySize; + uint32_t maxMeshOutputVertices; + uint32_t maxMeshOutputPrimitives; + uint32_t maxMeshMultiviewViewCount; + uint32_t meshOutputPerVertexGranularity; + uint32_t meshOutputPerPrimitiveGranularity; +} VkPhysicalDeviceMeshShaderPropertiesNV; + +typedef struct VkDrawMeshTasksIndirectCommandNV { + uint32_t taskCount; + uint32_t firstTask; +} VkDrawMeshTasksIndirectCommandNV; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksNV)(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksNV( + VkCommandBuffer commandBuffer, + uint32_t taskCount, + uint32_t firstTask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_NV_fragment_shader_barycentric 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_NV_fragment_shader_barycentric" +typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderBarycentric; +} VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV; + + + +#define VK_NV_shader_image_footprint 1 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 2 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME "VK_NV_shader_image_footprint" +typedef struct VkPhysicalDeviceShaderImageFootprintFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 imageFootprint; +} VkPhysicalDeviceShaderImageFootprintFeaturesNV; + + + +#define VK_NV_scissor_exclusive 1 +#define VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION 1 +#define VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME "VK_NV_scissor_exclusive" +typedef struct VkPipelineViewportExclusiveScissorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t exclusiveScissorCount; + const VkRect2D* pExclusiveScissors; +} VkPipelineViewportExclusiveScissorStateCreateInfoNV; + +typedef struct VkPhysicalDeviceExclusiveScissorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 exclusiveScissor; +} VkPhysicalDeviceExclusiveScissorFeaturesNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetExclusiveScissorNV)(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetExclusiveScissorNV( + VkCommandBuffer commandBuffer, + uint32_t firstExclusiveScissor, + uint32_t exclusiveScissorCount, + const VkRect2D* pExclusiveScissors); +#endif + + +#define VK_NV_device_diagnostic_checkpoints 1 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION 2 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME "VK_NV_device_diagnostic_checkpoints" +typedef struct VkQueueFamilyCheckpointPropertiesNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags checkpointExecutionStageMask; +} VkQueueFamilyCheckpointPropertiesNV; + +typedef struct VkCheckpointDataNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlagBits stage; + void* pCheckpointMarker; +} VkCheckpointDataNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetCheckpointNV)(VkCommandBuffer commandBuffer, const void* pCheckpointMarker); +typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointDataNV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCheckpointNV( + VkCommandBuffer commandBuffer, + const void* pCheckpointMarker); + +VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointDataNV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointDataNV* pCheckpointData); +#endif + + +#define VK_INTEL_shader_integer_functions2 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_SPEC_VERSION 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME "VK_INTEL_shader_integer_functions2" +typedef struct VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + VkStructureType sType; + void* pNext; + VkBool32 shaderIntegerFunctions2; +} VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + + + +#define VK_INTEL_performance_query 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPerformanceConfigurationINTEL) +#define VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION 2 +#define VK_INTEL_PERFORMANCE_QUERY_EXTENSION_NAME "VK_INTEL_performance_query" + +typedef enum VkPerformanceConfigurationTypeINTEL { + VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL = 0, + VK_PERFORMANCE_CONFIGURATION_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceConfigurationTypeINTEL; + +typedef enum VkQueryPoolSamplingModeINTEL { + VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL = 0, + VK_QUERY_POOL_SAMPLING_MODE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkQueryPoolSamplingModeINTEL; + +typedef enum VkPerformanceOverrideTypeINTEL { + VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL = 0, + VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL = 1, + VK_PERFORMANCE_OVERRIDE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceOverrideTypeINTEL; + +typedef enum VkPerformanceParameterTypeINTEL { + VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL = 0, + VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL = 1, + VK_PERFORMANCE_PARAMETER_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceParameterTypeINTEL; + +typedef enum VkPerformanceValueTypeINTEL { + VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL = 0, + VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL = 1, + VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL = 2, + VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL = 3, + VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL = 4, + VK_PERFORMANCE_VALUE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceValueTypeINTEL; +typedef union VkPerformanceValueDataINTEL { + uint32_t value32; + uint64_t value64; + float valueFloat; + VkBool32 valueBool; + const char* valueString; +} VkPerformanceValueDataINTEL; + +typedef struct VkPerformanceValueINTEL { + VkPerformanceValueTypeINTEL type; + VkPerformanceValueDataINTEL data; +} VkPerformanceValueINTEL; + +typedef struct VkInitializePerformanceApiInfoINTEL { + VkStructureType sType; + const void* pNext; + void* pUserData; +} VkInitializePerformanceApiInfoINTEL; + +typedef struct VkQueryPoolPerformanceQueryCreateInfoINTEL { + VkStructureType sType; + const void* pNext; + VkQueryPoolSamplingModeINTEL performanceCountersSampling; +} VkQueryPoolPerformanceQueryCreateInfoINTEL; + +typedef VkQueryPoolPerformanceQueryCreateInfoINTEL VkQueryPoolCreateInfoINTEL; + +typedef struct VkPerformanceMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint64_t marker; +} VkPerformanceMarkerInfoINTEL; + +typedef struct VkPerformanceStreamMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint32_t marker; +} VkPerformanceStreamMarkerInfoINTEL; + +typedef struct VkPerformanceOverrideInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceOverrideTypeINTEL type; + VkBool32 enable; + uint64_t parameter; +} VkPerformanceOverrideInfoINTEL; + +typedef struct VkPerformanceConfigurationAcquireInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceConfigurationTypeINTEL type; +} VkPerformanceConfigurationAcquireInfoINTEL; + +typedef VkResult (VKAPI_PTR *PFN_vkInitializePerformanceApiINTEL)(VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); +typedef void (VKAPI_PTR *PFN_vkUninitializePerformanceApiINTEL)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceStreamMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceOverrideINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo); +typedef VkResult (VKAPI_PTR *PFN_vkAcquirePerformanceConfigurationINTEL)(VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration); +typedef VkResult (VKAPI_PTR *PFN_vkReleasePerformanceConfigurationINTEL)(VkDevice device, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSetPerformanceConfigurationINTEL)(VkQueue queue, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkGetPerformanceParameterINTEL)(VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkInitializePerformanceApiINTEL( + VkDevice device, + const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); + +VKAPI_ATTR void VKAPI_CALL vkUninitializePerformanceApiINTEL( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceMarkerInfoINTEL* pMarkerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceStreamMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceOverrideINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceOverrideInfoINTEL* pOverrideInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquirePerformanceConfigurationINTEL( + VkDevice device, + const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, + VkPerformanceConfigurationINTEL* pConfiguration); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleasePerformanceConfigurationINTEL( + VkDevice device, + VkPerformanceConfigurationINTEL configuration); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSetPerformanceConfigurationINTEL( + VkQueue queue, + VkPerformanceConfigurationINTEL configuration); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPerformanceParameterINTEL( + VkDevice device, + VkPerformanceParameterTypeINTEL parameter, + VkPerformanceValueINTEL* pValue); +#endif + + +#define VK_EXT_pci_bus_info 1 +#define VK_EXT_PCI_BUS_INFO_SPEC_VERSION 2 +#define VK_EXT_PCI_BUS_INFO_EXTENSION_NAME "VK_EXT_pci_bus_info" +typedef struct VkPhysicalDevicePCIBusInfoPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t pciDomain; + uint32_t pciBus; + uint32_t pciDevice; + uint32_t pciFunction; +} VkPhysicalDevicePCIBusInfoPropertiesEXT; + + + +#define VK_AMD_display_native_hdr 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_SPEC_VERSION 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_EXTENSION_NAME "VK_AMD_display_native_hdr" +typedef struct VkDisplayNativeHdrSurfaceCapabilitiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 localDimmingSupport; +} VkDisplayNativeHdrSurfaceCapabilitiesAMD; + +typedef struct VkSwapchainDisplayNativeHdrCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkBool32 localDimmingEnable; +} VkSwapchainDisplayNativeHdrCreateInfoAMD; + +typedef void (VKAPI_PTR *PFN_vkSetLocalDimmingAMD)(VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetLocalDimmingAMD( + VkDevice device, + VkSwapchainKHR swapChain, + VkBool32 localDimmingEnable); +#endif + + +#define VK_EXT_fragment_density_map 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME "VK_EXT_fragment_density_map" +typedef struct VkPhysicalDeviceFragmentDensityMapFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMap; + VkBool32 fragmentDensityMapDynamic; + VkBool32 fragmentDensityMapNonSubsampledImages; +} VkPhysicalDeviceFragmentDensityMapFeaturesEXT; + +typedef struct VkPhysicalDeviceFragmentDensityMapPropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentDensityTexelSize; + VkExtent2D maxFragmentDensityTexelSize; + VkBool32 fragmentDensityInvocations; +} VkPhysicalDeviceFragmentDensityMapPropertiesEXT; + +typedef struct VkRenderPassFragmentDensityMapCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkAttachmentReference fragmentDensityMapAttachment; +} VkRenderPassFragmentDensityMapCreateInfoEXT; + + + +#define VK_EXT_scalar_block_layout 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME "VK_EXT_scalar_block_layout" +typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLayoutFeaturesEXT; + + + +#define VK_GOOGLE_hlsl_functionality1 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1" + + +#define VK_GOOGLE_decorate_string 1 +#define VK_GOOGLE_DECORATE_STRING_SPEC_VERSION 1 +#define VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME "VK_GOOGLE_decorate_string" + + +#define VK_EXT_subgroup_size_control 1 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME "VK_EXT_subgroup_size_control" +typedef struct VkPhysicalDeviceSubgroupSizeControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 subgroupSizeControl; + VkBool32 computeFullSubgroups; +} VkPhysicalDeviceSubgroupSizeControlFeaturesEXT; + +typedef struct VkPhysicalDeviceSubgroupSizeControlPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t minSubgroupSize; + uint32_t maxSubgroupSize; + uint32_t maxComputeWorkgroupSubgroups; + VkShaderStageFlags requiredSubgroupSizeStages; +} VkPhysicalDeviceSubgroupSizeControlPropertiesEXT; + +typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + VkStructureType sType; + void* pNext; + uint32_t requiredSubgroupSize; +} VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + + + +#define VK_AMD_shader_core_properties2 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME "VK_AMD_shader_core_properties2" + +typedef enum VkShaderCorePropertiesFlagBitsAMD { + VK_SHADER_CORE_PROPERTIES_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderCorePropertiesFlagBitsAMD; +typedef VkFlags VkShaderCorePropertiesFlagsAMD; +typedef struct VkPhysicalDeviceShaderCoreProperties2AMD { + VkStructureType sType; + void* pNext; + VkShaderCorePropertiesFlagsAMD shaderCoreFeatures; + uint32_t activeComputeUnitCount; +} VkPhysicalDeviceShaderCoreProperties2AMD; + + + +#define VK_AMD_device_coherent_memory 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME "VK_AMD_device_coherent_memory" +typedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD { + VkStructureType sType; + void* pNext; + VkBool32 deviceCoherentMemory; +} VkPhysicalDeviceCoherentMemoryFeaturesAMD; + + + +#define VK_EXT_shader_image_atomic_int64 1 +#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME "VK_EXT_shader_image_atomic_int64" +typedef struct VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderImageInt64Atomics; + VkBool32 sparseImageInt64Atomics; +} VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + + + +#define VK_EXT_memory_budget 1 +#define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1 +#define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME "VK_EXT_memory_budget" +typedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS]; + VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryBudgetPropertiesEXT; + + + +#define VK_EXT_memory_priority 1 +#define VK_EXT_MEMORY_PRIORITY_SPEC_VERSION 1 +#define VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME "VK_EXT_memory_priority" +typedef struct VkPhysicalDeviceMemoryPriorityFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 memoryPriority; +} VkPhysicalDeviceMemoryPriorityFeaturesEXT; + +typedef struct VkMemoryPriorityAllocateInfoEXT { + VkStructureType sType; + const void* pNext; + float priority; +} VkMemoryPriorityAllocateInfoEXT; + + + +#define VK_NV_dedicated_allocation_image_aliasing 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_EXTENSION_NAME "VK_NV_dedicated_allocation_image_aliasing" +typedef struct VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 dedicatedAllocationImageAliasing; +} VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + + + +#define VK_EXT_buffer_device_address 1 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 2 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_EXT_buffer_device_address" +typedef struct VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeaturesEXT; + +typedef VkPhysicalDeviceBufferDeviceAddressFeaturesEXT VkPhysicalDeviceBufferAddressFeaturesEXT; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoEXT; + +typedef struct VkBufferDeviceAddressCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceAddress deviceAddress; +} VkBufferDeviceAddressCreateInfoEXT; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressEXT)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressEXT( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); +#endif + + +#define VK_EXT_tooling_info 1 +#define VK_EXT_TOOLING_INFO_SPEC_VERSION 1 +#define VK_EXT_TOOLING_INFO_EXTENSION_NAME "VK_EXT_tooling_info" + +typedef enum VkToolPurposeFlagBitsEXT { + VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = 0x00000001, + VK_TOOL_PURPOSE_PROFILING_BIT_EXT = 0x00000002, + VK_TOOL_PURPOSE_TRACING_BIT_EXT = 0x00000004, + VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = 0x00000008, + VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = 0x00000010, + VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020, + VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040, + VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkToolPurposeFlagBitsEXT; +typedef VkFlags VkToolPurposeFlagsEXT; +typedef struct VkPhysicalDeviceToolPropertiesEXT { + VkStructureType sType; + void* pNext; + char name[VK_MAX_EXTENSION_NAME_SIZE]; + char version[VK_MAX_EXTENSION_NAME_SIZE]; + VkToolPurposeFlagsEXT purposes; + char description[VK_MAX_DESCRIPTION_SIZE]; + char layer[VK_MAX_EXTENSION_NAME_SIZE]; +} VkPhysicalDeviceToolPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolPropertiesEXT* pToolProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolPropertiesEXT* pToolProperties); +#endif + + +#define VK_EXT_separate_stencil_usage 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME "VK_EXT_separate_stencil_usage" +typedef VkImageStencilUsageCreateInfo VkImageStencilUsageCreateInfoEXT; + + + +#define VK_EXT_validation_features 1 +#define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 4 +#define VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME "VK_EXT_validation_features" + +typedef enum VkValidationFeatureEnableEXT { + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT = 0, + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT = 1, + VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT = 2, + VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT = 3, + VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT = 4, + VK_VALIDATION_FEATURE_ENABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureEnableEXT; + +typedef enum VkValidationFeatureDisableEXT { + VK_VALIDATION_FEATURE_DISABLE_ALL_EXT = 0, + VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT = 1, + VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT = 2, + VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT = 3, + VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT = 4, + VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT = 5, + VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT = 6, + VK_VALIDATION_FEATURE_DISABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureDisableEXT; +typedef struct VkValidationFeaturesEXT { + VkStructureType sType; + const void* pNext; + uint32_t enabledValidationFeatureCount; + const VkValidationFeatureEnableEXT* pEnabledValidationFeatures; + uint32_t disabledValidationFeatureCount; + const VkValidationFeatureDisableEXT* pDisabledValidationFeatures; +} VkValidationFeaturesEXT; + + + +#define VK_NV_cooperative_matrix 1 +#define VK_NV_COOPERATIVE_MATRIX_SPEC_VERSION 1 +#define VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME "VK_NV_cooperative_matrix" + +typedef enum VkComponentTypeNV { + VK_COMPONENT_TYPE_FLOAT16_NV = 0, + VK_COMPONENT_TYPE_FLOAT32_NV = 1, + VK_COMPONENT_TYPE_FLOAT64_NV = 2, + VK_COMPONENT_TYPE_SINT8_NV = 3, + VK_COMPONENT_TYPE_SINT16_NV = 4, + VK_COMPONENT_TYPE_SINT32_NV = 5, + VK_COMPONENT_TYPE_SINT64_NV = 6, + VK_COMPONENT_TYPE_UINT8_NV = 7, + VK_COMPONENT_TYPE_UINT16_NV = 8, + VK_COMPONENT_TYPE_UINT32_NV = 9, + VK_COMPONENT_TYPE_UINT64_NV = 10, + VK_COMPONENT_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkComponentTypeNV; + +typedef enum VkScopeNV { + VK_SCOPE_DEVICE_NV = 1, + VK_SCOPE_WORKGROUP_NV = 2, + VK_SCOPE_SUBGROUP_NV = 3, + VK_SCOPE_QUEUE_FAMILY_NV = 5, + VK_SCOPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkScopeNV; +typedef struct VkCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t MSize; + uint32_t NSize; + uint32_t KSize; + VkComponentTypeNV AType; + VkComponentTypeNV BType; + VkComponentTypeNV CType; + VkComponentTypeNV DType; + VkScopeNV scope; +} VkCooperativeMatrixPropertiesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cooperativeMatrix; + VkBool32 cooperativeMatrixRobustBufferAccess; +} VkPhysicalDeviceCooperativeMatrixFeaturesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + VkShaderStageFlags cooperativeMatrixSupportedStages; +} VkPhysicalDeviceCooperativeMatrixPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixPropertiesNV* pProperties); +#endif + + +#define VK_NV_coverage_reduction_mode 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_SPEC_VERSION 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME "VK_NV_coverage_reduction_mode" + +typedef enum VkCoverageReductionModeNV { + VK_COVERAGE_REDUCTION_MODE_MERGE_NV = 0, + VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV = 1, + VK_COVERAGE_REDUCTION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageReductionModeNV; +typedef VkFlags VkPipelineCoverageReductionStateCreateFlagsNV; +typedef struct VkPhysicalDeviceCoverageReductionModeFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 coverageReductionMode; +} VkPhysicalDeviceCoverageReductionModeFeaturesNV; + +typedef struct VkPipelineCoverageReductionStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageReductionStateCreateFlagsNV flags; + VkCoverageReductionModeNV coverageReductionMode; +} VkPipelineCoverageReductionStateCreateInfoNV; + +typedef struct VkFramebufferMixedSamplesCombinationNV { + VkStructureType sType; + void* pNext; + VkCoverageReductionModeNV coverageReductionMode; + VkSampleCountFlagBits rasterizationSamples; + VkSampleCountFlags depthStencilSamples; + VkSampleCountFlags colorSamples; +} VkFramebufferMixedSamplesCombinationNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)(VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + VkPhysicalDevice physicalDevice, + uint32_t* pCombinationCount, + VkFramebufferMixedSamplesCombinationNV* pCombinations); +#endif + + +#define VK_EXT_fragment_shader_interlock 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME "VK_EXT_fragment_shader_interlock" +typedef struct VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderSampleInterlock; + VkBool32 fragmentShaderPixelInterlock; + VkBool32 fragmentShaderShadingRateInterlock; +} VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT; + + + +#define VK_EXT_ycbcr_image_arrays 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_EXTENSION_NAME "VK_EXT_ycbcr_image_arrays" +typedef struct VkPhysicalDeviceYcbcrImageArraysFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 ycbcrImageArrays; +} VkPhysicalDeviceYcbcrImageArraysFeaturesEXT; + + + +#define VK_EXT_headless_surface 1 +#define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 1 +#define VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME "VK_EXT_headless_surface" +typedef VkFlags VkHeadlessSurfaceCreateFlagsEXT; +typedef struct VkHeadlessSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkHeadlessSurfaceCreateFlagsEXT flags; +} VkHeadlessSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateHeadlessSurfaceEXT)(VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT( + VkInstance instance, + const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_EXT_line_rasterization 1 +#define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME "VK_EXT_line_rasterization" + +typedef enum VkLineRasterizationModeEXT { + VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = 0, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = 1, + VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = 2, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = 3, + VK_LINE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkLineRasterizationModeEXT; +typedef struct VkPhysicalDeviceLineRasterizationFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 rectangularLines; + VkBool32 bresenhamLines; + VkBool32 smoothLines; + VkBool32 stippledRectangularLines; + VkBool32 stippledBresenhamLines; + VkBool32 stippledSmoothLines; +} VkPhysicalDeviceLineRasterizationFeaturesEXT; + +typedef struct VkPhysicalDeviceLineRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t lineSubPixelPrecisionBits; +} VkPhysicalDeviceLineRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationLineStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkLineRasterizationModeEXT lineRasterizationMode; + VkBool32 stippledLineEnable; + uint32_t lineStippleFactor; + uint16_t lineStipplePattern; +} VkPipelineRasterizationLineStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern); +#endif + + +#define VK_EXT_shader_atomic_float 1 +#define VK_EXT_SHADER_ATOMIC_FLOAT_SPEC_VERSION 1 +#define VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME "VK_EXT_shader_atomic_float" +typedef struct VkPhysicalDeviceShaderAtomicFloatFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferFloat32Atomics; + VkBool32 shaderBufferFloat32AtomicAdd; + VkBool32 shaderBufferFloat64Atomics; + VkBool32 shaderBufferFloat64AtomicAdd; + VkBool32 shaderSharedFloat32Atomics; + VkBool32 shaderSharedFloat32AtomicAdd; + VkBool32 shaderSharedFloat64Atomics; + VkBool32 shaderSharedFloat64AtomicAdd; + VkBool32 shaderImageFloat32Atomics; + VkBool32 shaderImageFloat32AtomicAdd; + VkBool32 sparseImageFloat32Atomics; + VkBool32 sparseImageFloat32AtomicAdd; +} VkPhysicalDeviceShaderAtomicFloatFeaturesEXT; + + + +#define VK_EXT_host_query_reset 1 +#define VK_EXT_HOST_QUERY_RESET_SPEC_VERSION 1 +#define VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME "VK_EXT_host_query_reset" +typedef VkPhysicalDeviceHostQueryResetFeatures VkPhysicalDeviceHostQueryResetFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkResetQueryPoolEXT)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); +#endif + + +#define VK_EXT_index_type_uint8 1 +#define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1 +#define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_EXT_index_type_uint8" +typedef struct VkPhysicalDeviceIndexTypeUint8FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 indexTypeUint8; +} VkPhysicalDeviceIndexTypeUint8FeaturesEXT; + + + +#define VK_EXT_extended_dynamic_state 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_SPEC_VERSION 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME "VK_EXT_extended_dynamic_state" +typedef struct VkPhysicalDeviceExtendedDynamicStateFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 extendedDynamicState; +} VkPhysicalDeviceExtendedDynamicStateFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetCullModeEXT)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetFrontFaceEXT)(VkCommandBuffer commandBuffer, VkFrontFace frontFace); +typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopologyEXT)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2EXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOpEXT)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilOpEXT)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCullModeEXT( + VkCommandBuffer commandBuffer, + VkCullModeFlags cullMode); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFaceEXT( + VkCommandBuffer commandBuffer, + VkFrontFace frontFace); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopologyEXT( + VkCommandBuffer commandBuffer, + VkPrimitiveTopology primitiveTopology); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2EXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes, + const VkDeviceSize* pStrides); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthWriteEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOpEXT( + VkCommandBuffer commandBuffer, + VkCompareOp depthCompareOp); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthBoundsTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 stencilTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOpEXT( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + VkStencilOp failOp, + VkStencilOp passOp, + VkStencilOp depthFailOp, + VkCompareOp compareOp); +#endif + + +#define VK_EXT_shader_demote_to_helper_invocation 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME "VK_EXT_shader_demote_to_helper_invocation" +typedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderDemoteToHelperInvocation; +} VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + + + +#define VK_NV_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNV) +#define VK_NV_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 +#define VK_NV_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NV_device_generated_commands" + +typedef enum VkIndirectCommandsTokenTypeNV { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeNV; + +typedef enum VkIndirectStateFlagBitsNV { + VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV = 0x00000001, + VK_INDIRECT_STATE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectStateFlagBitsNV; +typedef VkFlags VkIndirectStateFlagsNV; + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsNV { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV = 0x00000004, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsNV; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNV; +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxGraphicsShaderGroupCount; + uint32_t maxIndirectSequenceCount; + uint32_t maxIndirectCommandsTokenCount; + uint32_t maxIndirectCommandsStreamCount; + uint32_t maxIndirectCommandsTokenOffset; + uint32_t maxIndirectCommandsStreamStride; + uint32_t minSequencesCountBufferOffsetAlignment; + uint32_t minSequencesIndexBufferOffsetAlignment; + uint32_t minIndirectCommandsBufferOffsetAlignment; +} VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV; + +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 deviceGeneratedCommands; +} VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV; + +typedef struct VkGraphicsShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; +} VkGraphicsShaderGroupCreateInfoNV; + +typedef struct VkGraphicsPipelineShaderGroupsCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t groupCount; + const VkGraphicsShaderGroupCreateInfoNV* pGroups; + uint32_t pipelineCount; + const VkPipeline* pPipelines; +} VkGraphicsPipelineShaderGroupsCreateInfoNV; + +typedef struct VkBindShaderGroupIndirectCommandNV { + uint32_t groupIndex; +} VkBindShaderGroupIndirectCommandNV; + +typedef struct VkBindIndexBufferIndirectCommandNV { + VkDeviceAddress bufferAddress; + uint32_t size; + VkIndexType indexType; +} VkBindIndexBufferIndirectCommandNV; + +typedef struct VkBindVertexBufferIndirectCommandNV { + VkDeviceAddress bufferAddress; + uint32_t size; + uint32_t stride; +} VkBindVertexBufferIndirectCommandNV; + +typedef struct VkSetStateFlagsIndirectCommandNV { + uint32_t data; +} VkSetStateFlagsIndirectCommandNV; + +typedef struct VkIndirectCommandsStreamNV { + VkBuffer buffer; + VkDeviceSize offset; +} VkIndirectCommandsStreamNV; + +typedef struct VkIndirectCommandsLayoutTokenNV { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsTokenTypeNV tokenType; + uint32_t stream; + uint32_t offset; + uint32_t vertexBindingUnit; + VkBool32 vertexDynamicStride; + VkPipelineLayout pushconstantPipelineLayout; + VkShaderStageFlags pushconstantShaderStageFlags; + uint32_t pushconstantOffset; + uint32_t pushconstantSize; + VkIndirectStateFlagsNV indirectStateFlags; + uint32_t indexTypeCount; + const VkIndexType* pIndexTypes; + const uint32_t* pIndexTypeValues; +} VkIndirectCommandsLayoutTokenNV; + +typedef struct VkIndirectCommandsLayoutCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsLayoutUsageFlagsNV flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenNV* pTokens; + uint32_t streamCount; + const uint32_t* pStreamStrides; +} VkIndirectCommandsLayoutCreateInfoNV; + +typedef struct VkGeneratedCommandsInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; + VkIndirectCommandsLayoutNV indirectCommandsLayout; + uint32_t streamCount; + const VkIndirectCommandsStreamNV* pStreams; + uint32_t sequencesCount; + VkBuffer preprocessBuffer; + VkDeviceSize preprocessOffset; + VkDeviceSize preprocessSize; + VkBuffer sequencesCountBuffer; + VkDeviceSize sequencesCountOffset; + VkBuffer sequencesIndexBuffer; + VkDeviceSize sequencesIndexOffset; +} VkGeneratedCommandsInfoNV; + +typedef struct VkGeneratedCommandsMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; + VkIndirectCommandsLayoutNV indirectCommandsLayout; + uint32_t maxSequencesCount; +} VkGeneratedCommandsMemoryRequirementsInfoNV; + +typedef void (VKAPI_PTR *PFN_vkGetGeneratedCommandsMemoryRequirementsNV)(VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdPreprocessGeneratedCommandsNV)(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteGeneratedCommandsNV)(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipelineShaderGroupNV)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNV)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNV)(VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetGeneratedCommandsMemoryRequirementsNV( + VkDevice device, + const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkCmdPreprocessGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + VkBool32 isPreprocessed, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipelineShaderGroupNV( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline, + uint32_t groupIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNV( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNV( + VkDevice device, + VkIndirectCommandsLayoutNV indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); +#endif + + +#define VK_NV_inherited_viewport_scissor 1 +#define VK_NV_INHERITED_VIEWPORT_SCISSOR_SPEC_VERSION 1 +#define VK_NV_INHERITED_VIEWPORT_SCISSOR_EXTENSION_NAME "VK_NV_inherited_viewport_scissor" +typedef struct VkPhysicalDeviceInheritedViewportScissorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 inheritedViewportScissor2D; +} VkPhysicalDeviceInheritedViewportScissorFeaturesNV; + +typedef struct VkCommandBufferInheritanceViewportScissorInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportScissor2D; + uint32_t viewportDepthCount; + const VkViewport* pViewportDepths; +} VkCommandBufferInheritanceViewportScissorInfoNV; + + + +#define VK_EXT_texel_buffer_alignment 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME "VK_EXT_texel_buffer_alignment" +typedef struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 texelBufferAlignment; +} VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT; + +typedef struct VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize storageTexelBufferOffsetAlignmentBytes; + VkBool32 storageTexelBufferOffsetSingleTexelAlignment; + VkDeviceSize uniformTexelBufferOffsetAlignmentBytes; + VkBool32 uniformTexelBufferOffsetSingleTexelAlignment; +} VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT; + + + +#define VK_QCOM_render_pass_transform 1 +#define VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION 2 +#define VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME "VK_QCOM_render_pass_transform" +typedef struct VkRenderPassTransformBeginInfoQCOM { + VkStructureType sType; + void* pNext; + VkSurfaceTransformFlagBitsKHR transform; +} VkRenderPassTransformBeginInfoQCOM; + +typedef struct VkCommandBufferInheritanceRenderPassTransformInfoQCOM { + VkStructureType sType; + void* pNext; + VkSurfaceTransformFlagBitsKHR transform; + VkRect2D renderArea; +} VkCommandBufferInheritanceRenderPassTransformInfoQCOM; + + + +#define VK_EXT_device_memory_report 1 +#define VK_EXT_DEVICE_MEMORY_REPORT_SPEC_VERSION 2 +#define VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME "VK_EXT_device_memory_report" + +typedef enum VkDeviceMemoryReportEventTypeEXT { + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT = 0, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT = 1, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT = 2, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT = 3, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT = 4, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceMemoryReportEventTypeEXT; +typedef VkFlags VkDeviceMemoryReportFlagsEXT; +typedef struct VkPhysicalDeviceDeviceMemoryReportFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 deviceMemoryReport; +} VkPhysicalDeviceDeviceMemoryReportFeaturesEXT; + +typedef struct VkDeviceMemoryReportCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDeviceMemoryReportFlagsEXT flags; + VkDeviceMemoryReportEventTypeEXT type; + uint64_t memoryObjectId; + VkDeviceSize size; + VkObjectType objectType; + uint64_t objectHandle; + uint32_t heapIndex; +} VkDeviceMemoryReportCallbackDataEXT; + +typedef void (VKAPI_PTR *PFN_vkDeviceMemoryReportCallbackEXT)( + const VkDeviceMemoryReportCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDeviceDeviceMemoryReportCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceMemoryReportFlagsEXT flags; + PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback; + void* pUserData; +} VkDeviceDeviceMemoryReportCreateInfoEXT; + + + +#define VK_EXT_robustness2 1 +#define VK_EXT_ROBUSTNESS_2_SPEC_VERSION 1 +#define VK_EXT_ROBUSTNESS_2_EXTENSION_NAME "VK_EXT_robustness2" +typedef struct VkPhysicalDeviceRobustness2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 robustBufferAccess2; + VkBool32 robustImageAccess2; + VkBool32 nullDescriptor; +} VkPhysicalDeviceRobustness2FeaturesEXT; + +typedef struct VkPhysicalDeviceRobustness2PropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize robustStorageBufferAccessSizeAlignment; + VkDeviceSize robustUniformBufferAccessSizeAlignment; +} VkPhysicalDeviceRobustness2PropertiesEXT; + + + +#define VK_EXT_custom_border_color 1 +#define VK_EXT_CUSTOM_BORDER_COLOR_SPEC_VERSION 12 +#define VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME "VK_EXT_custom_border_color" +typedef struct VkSamplerCustomBorderColorCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkClearColorValue customBorderColor; + VkFormat format; +} VkSamplerCustomBorderColorCreateInfoEXT; + +typedef struct VkPhysicalDeviceCustomBorderColorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxCustomBorderColorSamplers; +} VkPhysicalDeviceCustomBorderColorPropertiesEXT; + +typedef struct VkPhysicalDeviceCustomBorderColorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 customBorderColors; + VkBool32 customBorderColorWithoutFormat; +} VkPhysicalDeviceCustomBorderColorFeaturesEXT; + + + +#define VK_GOOGLE_user_type 1 +#define VK_GOOGLE_USER_TYPE_SPEC_VERSION 1 +#define VK_GOOGLE_USER_TYPE_EXTENSION_NAME "VK_GOOGLE_user_type" + + +#define VK_EXT_private_data 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlotEXT) +#define VK_EXT_PRIVATE_DATA_SPEC_VERSION 1 +#define VK_EXT_PRIVATE_DATA_EXTENSION_NAME "VK_EXT_private_data" + +typedef enum VkPrivateDataSlotCreateFlagBitsEXT { + VK_PRIVATE_DATA_SLOT_CREATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPrivateDataSlotCreateFlagBitsEXT; +typedef VkFlags VkPrivateDataSlotCreateFlagsEXT; +typedef struct VkPhysicalDevicePrivateDataFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 privateData; +} VkPhysicalDevicePrivateDataFeaturesEXT; + +typedef struct VkDevicePrivateDataCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t privateDataSlotRequestCount; +} VkDevicePrivateDataCreateInfoEXT; + +typedef struct VkPrivateDataSlotCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPrivateDataSlotCreateFlagsEXT flags; +} VkPrivateDataSlotCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlotEXT)(VkDevice device, const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlotEXT* pPrivateDataSlot); +typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlotEXT)(VkDevice device, VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data); +typedef void (VKAPI_PTR *PFN_vkGetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlotEXT( + VkDevice device, + const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPrivateDataSlotEXT* pPrivateDataSlot); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlotEXT( + VkDevice device, + VkPrivateDataSlotEXT privateDataSlot, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t data); + +VKAPI_ATTR void VKAPI_CALL vkGetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t* pData); +#endif + + +#define VK_EXT_pipeline_creation_cache_control 1 +#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION 3 +#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME "VK_EXT_pipeline_creation_cache_control" +typedef struct VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 pipelineCreationCacheControl; +} VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT; + + + +#define VK_NV_device_diagnostics_config 1 +#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION 1 +#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME "VK_NV_device_diagnostics_config" + +typedef enum VkDeviceDiagnosticsConfigFlagBitsNV { + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV = 0x00000001, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV = 0x00000002, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV = 0x00000004, + VK_DEVICE_DIAGNOSTICS_CONFIG_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkDeviceDiagnosticsConfigFlagBitsNV; +typedef VkFlags VkDeviceDiagnosticsConfigFlagsNV; +typedef struct VkPhysicalDeviceDiagnosticsConfigFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 diagnosticsConfig; +} VkPhysicalDeviceDiagnosticsConfigFeaturesNV; + +typedef struct VkDeviceDiagnosticsConfigCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceDiagnosticsConfigFlagsNV flags; +} VkDeviceDiagnosticsConfigCreateInfoNV; + + + +#define VK_QCOM_render_pass_store_ops 1 +#define VK_QCOM_render_pass_store_ops_SPEC_VERSION 2 +#define VK_QCOM_render_pass_store_ops_EXTENSION_NAME "VK_QCOM_render_pass_store_ops" + + +#define VK_NV_fragment_shading_rate_enums 1 +#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME "VK_NV_fragment_shading_rate_enums" + +typedef enum VkFragmentShadingRateTypeNV { + VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV = 0, + VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV = 1, + VK_FRAGMENT_SHADING_RATE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkFragmentShadingRateTypeNV; + +typedef enum VkFragmentShadingRateNV { + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV = 0, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV = 1, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV = 4, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV = 5, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV = 6, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV = 10, + VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV = 11, + VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV = 12, + VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV = 13, + VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV = 14, + VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV = 15, + VK_FRAGMENT_SHADING_RATE_MAX_ENUM_NV = 0x7FFFFFFF +} VkFragmentShadingRateNV; +typedef struct VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShadingRateEnums; + VkBool32 supersampleFragmentShadingRates; + VkBool32 noInvocationFragmentShadingRates; +} VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + +typedef struct VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV { + VkStructureType sType; + void* pNext; + VkSampleCountFlagBits maxFragmentShadingRateInvocationCount; +} VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + +typedef struct VkPipelineFragmentShadingRateEnumStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkFragmentShadingRateTypeNV shadingRateType; + VkFragmentShadingRateNV shadingRate; + VkFragmentShadingRateCombinerOpKHR combinerOps[2]; +} VkPipelineFragmentShadingRateEnumStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateEnumNV)(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateEnumNV( + VkCommandBuffer commandBuffer, + VkFragmentShadingRateNV shadingRate, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); +#endif + + +#define VK_EXT_ycbcr_2plane_444_formats 1 +#define VK_EXT_YCBCR_2PLANE_444_FORMATS_SPEC_VERSION 1 +#define VK_EXT_YCBCR_2PLANE_444_FORMATS_EXTENSION_NAME "VK_EXT_ycbcr_2plane_444_formats" +typedef struct VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 ycbcr2plane444Formats; +} VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT; + + + +#define VK_EXT_fragment_density_map2 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_2_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME "VK_EXT_fragment_density_map2" +typedef struct VkPhysicalDeviceFragmentDensityMap2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMapDeferred; +} VkPhysicalDeviceFragmentDensityMap2FeaturesEXT; + +typedef struct VkPhysicalDeviceFragmentDensityMap2PropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 subsampledLoads; + VkBool32 subsampledCoarseReconstructionEarlyAccess; + uint32_t maxSubsampledArrayLayers; + uint32_t maxDescriptorSetSubsampledSamplers; +} VkPhysicalDeviceFragmentDensityMap2PropertiesEXT; + + + +#define VK_QCOM_rotated_copy_commands 1 +#define VK_QCOM_ROTATED_COPY_COMMANDS_SPEC_VERSION 1 +#define VK_QCOM_ROTATED_COPY_COMMANDS_EXTENSION_NAME "VK_QCOM_rotated_copy_commands" +typedef struct VkCopyCommandTransformInfoQCOM { + VkStructureType sType; + const void* pNext; + VkSurfaceTransformFlagBitsKHR transform; +} VkCopyCommandTransformInfoQCOM; + + + +#define VK_EXT_image_robustness 1 +#define VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION 1 +#define VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_image_robustness" +typedef struct VkPhysicalDeviceImageRobustnessFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 robustImageAccess; +} VkPhysicalDeviceImageRobustnessFeaturesEXT; + + + +#define VK_EXT_4444_formats 1 +#define VK_EXT_4444_FORMATS_SPEC_VERSION 1 +#define VK_EXT_4444_FORMATS_EXTENSION_NAME "VK_EXT_4444_formats" +typedef struct VkPhysicalDevice4444FormatsFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 formatA4R4G4B4; + VkBool32 formatA4B4G4R4; +} VkPhysicalDevice4444FormatsFeaturesEXT; + + + +#define VK_NV_acquire_winrt_display 1 +#define VK_NV_ACQUIRE_WINRT_DISPLAY_SPEC_VERSION 1 +#define VK_NV_ACQUIRE_WINRT_DISPLAY_EXTENSION_NAME "VK_NV_acquire_winrt_display" +typedef VkResult (VKAPI_PTR *PFN_vkAcquireWinrtDisplayNV)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); +typedef VkResult (VKAPI_PTR *PFN_vkGetWinrtDisplayNV)(VkPhysicalDevice physicalDevice, uint32_t deviceRelativeId, VkDisplayKHR* pDisplay); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireWinrtDisplayNV( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetWinrtDisplayNV( + VkPhysicalDevice physicalDevice, + uint32_t deviceRelativeId, + VkDisplayKHR* pDisplay); +#endif + + +#define VK_VALVE_mutable_descriptor_type 1 +#define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION 1 +#define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME "VK_VALVE_mutable_descriptor_type" +typedef struct VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE { + VkStructureType sType; + void* pNext; + VkBool32 mutableDescriptorType; +} VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE; + +typedef struct VkMutableDescriptorTypeListVALVE { + uint32_t descriptorTypeCount; + const VkDescriptorType* pDescriptorTypes; +} VkMutableDescriptorTypeListVALVE; + +typedef struct VkMutableDescriptorTypeCreateInfoVALVE { + VkStructureType sType; + const void* pNext; + uint32_t mutableDescriptorTypeListCount; + const VkMutableDescriptorTypeListVALVE* pMutableDescriptorTypeLists; +} VkMutableDescriptorTypeCreateInfoVALVE; + + + +#define VK_EXT_vertex_input_dynamic_state 1 +#define VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_SPEC_VERSION 2 +#define VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME "VK_EXT_vertex_input_dynamic_state" +typedef struct VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 vertexInputDynamicState; +} VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT; + +typedef struct VkVertexInputBindingDescription2EXT { + VkStructureType sType; + void* pNext; + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; + uint32_t divisor; +} VkVertexInputBindingDescription2EXT; + +typedef struct VkVertexInputAttributeDescription2EXT { + VkStructureType sType; + void* pNext; + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription2EXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetVertexInputEXT)(VkCommandBuffer commandBuffer, uint32_t vertexBindingDescriptionCount, const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions, uint32_t vertexAttributeDescriptionCount, const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetVertexInputEXT( + VkCommandBuffer commandBuffer, + uint32_t vertexBindingDescriptionCount, + const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions, + uint32_t vertexAttributeDescriptionCount, + const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions); +#endif + + +#define VK_EXT_extended_dynamic_state2 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_2_SPEC_VERSION 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME "VK_EXT_extended_dynamic_state2" +typedef struct VkPhysicalDeviceExtendedDynamicState2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 extendedDynamicState2; + VkBool32 extendedDynamicState2LogicOp; + VkBool32 extendedDynamicState2PatchControlPoints; +} VkPhysicalDeviceExtendedDynamicState2FeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetPatchControlPointsEXT)(VkCommandBuffer commandBuffer, uint32_t patchControlPoints); +typedef void (VKAPI_PTR *PFN_vkCmdSetRasterizerDiscardEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBiasEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetLogicOpEXT)(VkCommandBuffer commandBuffer, VkLogicOp logicOp); +typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveRestartEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetPatchControlPointsEXT( + VkCommandBuffer commandBuffer, + uint32_t patchControlPoints); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizerDiscardEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 rasterizerDiscardEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBiasEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthBiasEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLogicOpEXT( + VkCommandBuffer commandBuffer, + VkLogicOp logicOp); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveRestartEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 primitiveRestartEnable); +#endif + + +#define VK_EXT_color_write_enable 1 +#define VK_EXT_COLOR_WRITE_ENABLE_SPEC_VERSION 1 +#define VK_EXT_COLOR_WRITE_ENABLE_EXTENSION_NAME "VK_EXT_color_write_enable" +typedef struct VkPhysicalDeviceColorWriteEnableFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 colorWriteEnable; +} VkPhysicalDeviceColorWriteEnableFeaturesEXT; + +typedef struct VkPipelineColorWriteCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentCount; + const VkBool32* pColorWriteEnables; +} VkPipelineColorWriteCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetColorWriteEnableEXT)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkBool32* pColorWriteEnables); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetColorWriteEnableEXT( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkBool32* pColorWriteEnables); +#endif + + +#define VK_KHR_acceleration_structure 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) +#define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 11 +#define VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME "VK_KHR_acceleration_structure" + +typedef enum VkBuildAccelerationStructureModeKHR { + VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR = 0, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR = 1, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkBuildAccelerationStructureModeKHR; + +typedef enum VkAccelerationStructureBuildTypeKHR { + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureBuildTypeKHR; + +typedef enum VkAccelerationStructureCompatibilityKHR { + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR = 0, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR = 1, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureCompatibilityKHR; + +typedef enum VkAccelerationStructureCreateFlagBitsKHR { + VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = 0x00000001, + VK_ACCELERATION_STRUCTURE_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureCreateFlagBitsKHR; +typedef VkFlags VkAccelerationStructureCreateFlagsKHR; +typedef union VkDeviceOrHostAddressKHR { + VkDeviceAddress deviceAddress; + void* hostAddress; +} VkDeviceOrHostAddressKHR; + +typedef union VkDeviceOrHostAddressConstKHR { + VkDeviceAddress deviceAddress; + const void* hostAddress; +} VkDeviceOrHostAddressConstKHR; + +typedef struct VkAccelerationStructureBuildRangeInfoKHR { + uint32_t primitiveCount; + uint32_t primitiveOffset; + uint32_t firstVertex; + uint32_t transformOffset; +} VkAccelerationStructureBuildRangeInfoKHR; + +typedef struct VkAccelerationStructureGeometryTrianglesDataKHR { + VkStructureType sType; + const void* pNext; + VkFormat vertexFormat; + VkDeviceOrHostAddressConstKHR vertexData; + VkDeviceSize vertexStride; + uint32_t maxVertex; + VkIndexType indexType; + VkDeviceOrHostAddressConstKHR indexData; + VkDeviceOrHostAddressConstKHR transformData; +} VkAccelerationStructureGeometryTrianglesDataKHR; + +typedef struct VkAccelerationStructureGeometryAabbsDataKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR data; + VkDeviceSize stride; +} VkAccelerationStructureGeometryAabbsDataKHR; + +typedef struct VkAccelerationStructureGeometryInstancesDataKHR { + VkStructureType sType; + const void* pNext; + VkBool32 arrayOfPointers; + VkDeviceOrHostAddressConstKHR data; +} VkAccelerationStructureGeometryInstancesDataKHR; + +typedef union VkAccelerationStructureGeometryDataKHR { + VkAccelerationStructureGeometryTrianglesDataKHR triangles; + VkAccelerationStructureGeometryAabbsDataKHR aabbs; + VkAccelerationStructureGeometryInstancesDataKHR instances; +} VkAccelerationStructureGeometryDataKHR; + +typedef struct VkAccelerationStructureGeometryKHR { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkAccelerationStructureGeometryDataKHR geometry; + VkGeometryFlagsKHR flags; +} VkAccelerationStructureGeometryKHR; + +typedef struct VkAccelerationStructureBuildGeometryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeKHR type; + VkBuildAccelerationStructureFlagsKHR flags; + VkBuildAccelerationStructureModeKHR mode; + VkAccelerationStructureKHR srcAccelerationStructure; + VkAccelerationStructureKHR dstAccelerationStructure; + uint32_t geometryCount; + const VkAccelerationStructureGeometryKHR* pGeometries; + const VkAccelerationStructureGeometryKHR* const* ppGeometries; + VkDeviceOrHostAddressKHR scratchData; +} VkAccelerationStructureBuildGeometryInfoKHR; + +typedef struct VkAccelerationStructureCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureCreateFlagsKHR createFlags; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; + VkAccelerationStructureTypeKHR type; + VkDeviceAddress deviceAddress; +} VkAccelerationStructureCreateInfoKHR; + +typedef struct VkWriteDescriptorSetAccelerationStructureKHR { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureKHR* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureKHR; + +typedef struct VkPhysicalDeviceAccelerationStructureFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 accelerationStructure; + VkBool32 accelerationStructureCaptureReplay; + VkBool32 accelerationStructureIndirectBuild; + VkBool32 accelerationStructureHostCommands; + VkBool32 descriptorBindingAccelerationStructureUpdateAfterBind; +} VkPhysicalDeviceAccelerationStructureFeaturesKHR; + +typedef struct VkPhysicalDeviceAccelerationStructurePropertiesKHR { + VkStructureType sType; + void* pNext; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxPrimitiveCount; + uint32_t maxPerStageDescriptorAccelerationStructures; + uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures; + uint32_t maxDescriptorSetAccelerationStructures; + uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures; + uint32_t minAccelerationStructureScratchOffsetAlignment; +} VkPhysicalDeviceAccelerationStructurePropertiesKHR; + +typedef struct VkAccelerationStructureDeviceAddressInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR accelerationStructure; +} VkAccelerationStructureDeviceAddressInfoKHR; + +typedef struct VkAccelerationStructureVersionInfoKHR { + VkStructureType sType; + const void* pNext; + const uint8_t* pVersionData; +} VkAccelerationStructureVersionInfoKHR; + +typedef struct VkCopyAccelerationStructureToMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkDeviceOrHostAddressKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureToMemoryInfoKHR; + +typedef struct VkCopyMemoryToAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyMemoryToAccelerationStructureInfoKHR; + +typedef struct VkCopyAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureInfoKHR; + +typedef struct VkAccelerationStructureBuildSizesInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceSize accelerationStructureSize; + VkDeviceSize updateScratchSize; + VkDeviceSize buildScratchSize; +} VkAccelerationStructureBuildSizesInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureKHR)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresIndirectKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts); +typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructuresKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef void (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureBuildSizesKHR)(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR( + VkDevice device, + const VkAccelerationStructureCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureKHR* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureKHR( + VkDevice device, + VkAccelerationStructureKHR accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresIndirectKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkDeviceAddress* pIndirectDeviceAddresses, + const uint32_t* pIndirectStrides, + const uint32_t* const* ppMaxPrimitiveCounts); + +VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructuresKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR( + VkDevice device, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + size_t dataSize, + void* pData, + size_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR( + VkDevice device, + const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesKHR( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR( + VkDevice device, + const VkAccelerationStructureVersionInfoKHR* pVersionInfo, + VkAccelerationStructureCompatibilityKHR* pCompatibility); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureBuildSizesKHR( + VkDevice device, + VkAccelerationStructureBuildTypeKHR buildType, + const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, + const uint32_t* pMaxPrimitiveCounts, + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); +#endif + + +#define VK_KHR_ray_tracing_pipeline 1 +#define VK_KHR_RAY_TRACING_PIPELINE_SPEC_VERSION 1 +#define VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME "VK_KHR_ray_tracing_pipeline" + +typedef enum VkShaderGroupShaderKHR { + VK_SHADER_GROUP_SHADER_GENERAL_KHR = 0, + VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR = 1, + VK_SHADER_GROUP_SHADER_ANY_HIT_KHR = 2, + VK_SHADER_GROUP_SHADER_INTERSECTION_KHR = 3, + VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR = 0x7FFFFFFF +} VkShaderGroupShaderKHR; +typedef struct VkRayTracingShaderGroupCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; + const void* pShaderGroupCaptureReplayHandle; +} VkRayTracingShaderGroupCreateInfoKHR; + +typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxPipelineRayPayloadSize; + uint32_t maxPipelineRayHitAttributeSize; +} VkRayTracingPipelineInterfaceCreateInfoKHR; + +typedef struct VkRayTracingPipelineCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoKHR* pGroups; + uint32_t maxPipelineRayRecursionDepth; + const VkPipelineLibraryCreateInfoKHR* pLibraryInfo; + const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoKHR; + +typedef struct VkPhysicalDeviceRayTracingPipelineFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingPipeline; + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplay; + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed; + VkBool32 rayTracingPipelineTraceRaysIndirect; + VkBool32 rayTraversalPrimitiveCulling; +} VkPhysicalDeviceRayTracingPipelineFeaturesKHR; + +typedef struct VkPhysicalDeviceRayTracingPipelinePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRayRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint32_t shaderGroupHandleCaptureReplaySize; + uint32_t maxRayDispatchInvocationCount; + uint32_t shaderGroupHandleAlignment; + uint32_t maxRayHitAttributeSize; +} VkPhysicalDeviceRayTracingPipelinePropertiesKHR; + +typedef struct VkStridedDeviceAddressRegionKHR { + VkDeviceAddress deviceAddress; + VkDeviceSize stride; + VkDeviceSize size; +} VkStridedDeviceAddressRegionKHR; + +typedef struct VkTraceRaysIndirectCommandKHR { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkTraceRaysIndirectCommandKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress); +typedef VkDeviceSize (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupStackSizeKHR)(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader); +typedef void (VKAPI_PTR *PFN_vkCmdSetRayTracingPipelineStackSizeKHR)(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + VkDeviceAddress indirectDeviceAddress); + +VKAPI_ATTR VkDeviceSize VKAPI_CALL vkGetRayTracingShaderGroupStackSizeKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t group, + VkShaderGroupShaderKHR groupShader); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRayTracingPipelineStackSizeKHR( + VkCommandBuffer commandBuffer, + uint32_t pipelineStackSize); +#endif + + +#define VK_KHR_ray_query 1 +#define VK_KHR_RAY_QUERY_SPEC_VERSION 1 +#define VK_KHR_RAY_QUERY_EXTENSION_NAME "VK_KHR_ray_query" +typedef struct VkPhysicalDeviceRayQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayQuery; +} VkPhysicalDeviceRayQueryFeaturesKHR; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_ios.h b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_ios.h new file mode 100644 index 000000000..6e7e6afea --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_ios.h @@ -0,0 +1,47 @@ +#ifndef VULKAN_IOS_H_ +#define VULKAN_IOS_H_ 1 + +/* +** Copyright 2015-2021 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_MVK_ios_surface 1 +#define VK_MVK_IOS_SURFACE_SPEC_VERSION 3 +#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface" +typedef VkFlags VkIOSSurfaceCreateFlagsMVK; +typedef struct VkIOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkIOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkIOSSurfaceCreateInfoMVK; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK( + VkInstance instance, + const VkIOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_macos.h b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_macos.h new file mode 100644 index 000000000..c49b123d0 --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_macos.h @@ -0,0 +1,47 @@ +#ifndef VULKAN_MACOS_H_ +#define VULKAN_MACOS_H_ 1 + +/* +** Copyright 2015-2021 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_MVK_macos_surface 1 +#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 3 +#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface" +typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; +typedef struct VkMacOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkMacOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkMacOSSurfaceCreateInfoMVK; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK( + VkInstance instance, + const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_win32.h b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_win32.h new file mode 100644 index 000000000..1b680f0b1 --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_win32.h @@ -0,0 +1,315 @@ +#ifndef VULKAN_WIN32_H_ +#define VULKAN_WIN32_H_ 1 + +/* +** Copyright 2015-2021 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_win32_surface 1 +#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface" +typedef VkFlags VkWin32SurfaceCreateFlagsKHR; +typedef struct VkWin32SurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWin32SurfaceCreateFlagsKHR flags; + HINSTANCE hinstance; + HWND hwnd; +} VkWin32SurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR( + VkInstance instance, + const VkWin32SurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex); +#endif + + +#define VK_KHR_external_memory_win32 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32" +typedef struct VkImportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportMemoryWin32HandleInfoKHR; + +typedef struct VkExportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportMemoryWin32HandleInfoKHR; + +typedef struct VkMemoryWin32HandlePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryWin32HandlePropertiesKHR; + +typedef struct VkMemoryGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR( + VkDevice device, + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle, + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); +#endif + + +#define VK_KHR_win32_keyed_mutex 1 +#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex" +typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeouts; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoKHR; + + + +#define VK_KHR_external_semaphore_win32 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32" +typedef struct VkImportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportSemaphoreWin32HandleInfoKHR; + +typedef struct VkExportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportSemaphoreWin32HandleInfoKHR; + +typedef struct VkD3D12FenceSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValuesCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValuesCount; + const uint64_t* pSignalSemaphoreValues; +} VkD3D12FenceSubmitInfoKHR; + +typedef struct VkSemaphoreGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR( + VkDevice device, + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR( + VkDevice device, + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + + +#define VK_KHR_external_fence_win32 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32" +typedef struct VkImportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportFenceWin32HandleInfoKHR; + +typedef struct VkExportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportFenceWin32HandleInfoKHR; + +typedef struct VkFenceGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR( + VkDevice device, + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR( + VkDevice device, + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + + +#define VK_NV_external_memory_win32 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32" +typedef struct VkImportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleType; + HANDLE handle; +} VkImportMemoryWin32HandleInfoNV; + +typedef struct VkExportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; +} VkExportMemoryWin32HandleInfoNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV( + VkDevice device, + VkDeviceMemory memory, + VkExternalMemoryHandleTypeFlagsNV handleType, + HANDLE* pHandle); +#endif + + +#define VK_NV_win32_keyed_mutex 1 +#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2 +#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex" +typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeoutMilliseconds; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoNV; + + + +#define VK_EXT_full_screen_exclusive 1 +#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4 +#define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME "VK_EXT_full_screen_exclusive" + +typedef enum VkFullScreenExclusiveEXT { + VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT = 0, + VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT = 1, + VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT = 2, + VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT = 3, + VK_FULL_SCREEN_EXCLUSIVE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkFullScreenExclusiveEXT; +typedef struct VkSurfaceFullScreenExclusiveInfoEXT { + VkStructureType sType; + void* pNext; + VkFullScreenExclusiveEXT fullScreenExclusive; +} VkSurfaceFullScreenExclusiveInfoEXT; + +typedef struct VkSurfaceCapabilitiesFullScreenExclusiveEXT { + VkStructureType sType; + void* pNext; + VkBool32 fullScreenExclusiveSupported; +} VkSurfaceCapabilitiesFullScreenExclusiveEXT; + +typedef struct VkSurfaceFullScreenExclusiveWin32InfoEXT { + VkStructureType sType; + const void* pNext; + HMONITOR hmonitor; +} VkSurfaceFullScreenExclusiveWin32InfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); +typedef VkResult (VKAPI_PTR *PFN_vkReleaseFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModes2EXT)(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModes2EXT( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModes2EXT( + VkDevice device, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkDeviceGroupPresentModeFlagsKHR* pModes); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_xcb.h b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_xcb.h new file mode 100644 index 000000000..5ba2ad850 --- /dev/null +++ b/src/deps/skia/include/third_party/vulkan/vulkan/vulkan_xcb.h @@ -0,0 +1,55 @@ +#ifndef VULKAN_XCB_H_ +#define VULKAN_XCB_H_ 1 + +/* +** Copyright 2015-2021 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_xcb_surface 1 +#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface" +typedef VkFlags VkXcbSurfaceCreateFlagsKHR; +typedef struct VkXcbSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXcbSurfaceCreateFlagsKHR flags; + xcb_connection_t* connection; + xcb_window_t window; +} VkXcbSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR( + VkInstance instance, + const VkXcbSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t* connection, + xcb_visualid_t visual_id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/deps/skia/include/utils/BUILD.bazel b/src/deps/skia/include/utils/BUILD.bazel new file mode 100644 index 000000000..a1fbb0f07 --- /dev/null +++ b/src/deps/skia/include/utils/BUILD.bazel @@ -0,0 +1,155 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkAnimCodecPlayer_hdr", + hdrs = ["SkAnimCodecPlayer.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/codec:SkCodec_hdr"], +) + +generated_cc_atom( + name = "SkBase64_hdr", + hdrs = ["SkBase64.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkCamera_hdr", + hdrs = ["SkCamera.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkM44_hdr", + "//include/core:SkMatrix_hdr", + "//include/private:SkNoncopyable_hdr", + ], +) + +generated_cc_atom( + name = "SkCanvasStateUtils_hdr", + hdrs = ["SkCanvasStateUtils.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkCanvas_hdr"], +) + +generated_cc_atom( + name = "SkCustomTypeface_hdr", + hdrs = ["SkCustomTypeface.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkFontMetrics_hdr", + "//include/core:SkFontStyle_hdr", + "//include/core:SkImage_hdr", + "//include/core:SkPaint_hdr", + "//include/core:SkPath_hdr", + "//include/core:SkPicture_hdr", + "//include/core:SkTypeface_hdr", + ], +) + +generated_cc_atom( + name = "SkEventTracer_hdr", + hdrs = ["SkEventTracer.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkTypes_hdr"], +) + +generated_cc_atom( + name = "SkNWayCanvas_hdr", + hdrs = ["SkNWayCanvas.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkNoDrawCanvas_hdr", + "//include/core:SkCanvasVirtualEnforcer_hdr", + "//include/private:SkTDArray_hdr", + ], +) + +generated_cc_atom( + name = "SkNoDrawCanvas_hdr", + hdrs = ["SkNoDrawCanvas.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkCanvasVirtualEnforcer_hdr", + "//include/core:SkCanvas_hdr", + ], +) + +generated_cc_atom( + name = "SkNullCanvas_hdr", + hdrs = ["SkNullCanvas.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkCanvas_hdr"], +) + +generated_cc_atom( + name = "SkOrderedFontMgr_hdr", + hdrs = ["SkOrderedFontMgr.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkFontMgr_hdr"], +) + +generated_cc_atom( + name = "SkPaintFilterCanvas_hdr", + hdrs = ["SkPaintFilterCanvas.h"], + visibility = ["//:__subpackages__"], + deps = [ + ":SkNWayCanvas_hdr", + "//include/core:SkCanvasVirtualEnforcer_hdr", + ], +) + +generated_cc_atom( + name = "SkParsePath_hdr", + hdrs = ["SkParsePath.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkPath_hdr"], +) + +generated_cc_atom( + name = "SkParse_hdr", + hdrs = ["SkParse.h"], + visibility = ["//:__subpackages__"], + deps = ["//include/core:SkColor_hdr"], +) + +generated_cc_atom( + name = "SkRandom_hdr", + hdrs = ["SkRandom.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkScalar_hdr", + "//include/private:SkFixed_hdr", + "//include/private:SkFloatBits_hdr", + ], +) + +generated_cc_atom( + name = "SkShadowUtils_hdr", + hdrs = ["SkShadowUtils.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkColor_hdr", + "//include/core:SkPoint3_hdr", + "//include/core:SkScalar_hdr", + "//include/private:SkShadowFlags_hdr", + ], +) + +generated_cc_atom( + name = "SkTextUtils_hdr", + hdrs = ["SkTextUtils.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkCanvas_hdr", + "//include/core:SkFont_hdr", + "//include/core:SkPaint_hdr", + "//include/core:SkString_hdr", + ], +) + +generated_cc_atom( + name = "SkTraceEventPhase_hdr", + hdrs = ["SkTraceEventPhase.h"], + visibility = ["//:__subpackages__"], +) diff --git a/src/deps/skia/include/utils/SkAnimCodecPlayer.h b/src/deps/skia/include/utils/SkAnimCodecPlayer.h new file mode 100644 index 000000000..c8c98a483 --- /dev/null +++ b/src/deps/skia/include/utils/SkAnimCodecPlayer.h @@ -0,0 +1,60 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkAnimCodecPlayer_DEFINED +#define SkAnimCodecPlayer_DEFINED + +#include "include/codec/SkCodec.h" + +class SkImage; + +class SkAnimCodecPlayer { +public: + SkAnimCodecPlayer(std::unique_ptr<SkCodec> codec); + ~SkAnimCodecPlayer(); + + /** + * Returns the current frame of the animation. This defaults to the first frame for + * animated codecs (i.e. msec = 0). Calling this multiple times (without calling seek()) + * will always return the same image object (or null if there was an error). + */ + sk_sp<SkImage> getFrame(); + + /** + * Return the size of the image(s) that will be returned by getFrame(). + */ + SkISize dimensions() const; + + /** + * Returns the total duration of the animation in milliseconds. Returns 0 for a single-frame + * image. + */ + uint32_t duration() const { return fTotalDuration; } + + /** + * Finds the closest frame associated with the time code (in milliseconds) and sets that + * to be the current frame (call getFrame() to retrieve that image). + * Returns true iff this call to seek() changed the "current frame" for the animation. + * Thus if seek() returns false, then getFrame() will return the same image as it did + * before this call to seek(). + */ + bool seek(uint32_t msec); + + +private: + std::unique_ptr<SkCodec> fCodec; + SkImageInfo fImageInfo; + std::vector<SkCodec::FrameInfo> fFrameInfos; + std::vector<sk_sp<SkImage> > fImages; + int fCurrIndex = 0; + uint32_t fTotalDuration; + + sk_sp<SkImage> getFrameAt(int index); +}; + +#endif + diff --git a/src/deps/skia/include/utils/SkBase64.h b/src/deps/skia/include/utils/SkBase64.h new file mode 100644 index 000000000..ed76638b6 --- /dev/null +++ b/src/deps/skia/include/utils/SkBase64.h @@ -0,0 +1,51 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkBase64_DEFINED +#define SkBase64_DEFINED + +#include "include/core/SkTypes.h" + +struct SkBase64 { +public: + enum Error { + kNoError, + kPadError, + kBadCharError + }; + + /** + Base64 encodes src into dst. + + Normally this is called once with 'dst' nullptr to get the required size, then again with an + allocated 'dst' pointer to do the actual encoding. + + @param dst nullptr or a pointer to a buffer large enough to receive the result + + @param encode nullptr for default encoding or a pointer to at least 65 chars. + encode[64] will be used as the pad character. + Encodings other than the default encoding cannot be decoded. + + @return the required length of dst for encoding. + */ + static size_t Encode(const void* src, size_t length, void* dst, const char* encode = nullptr); + + /** + Base64 decodes src into dst. + + Normally this is called once with 'dst' nullptr to get the required size, then again with an + allocated 'dst' pointer to do the actual encoding. + + @param dst nullptr or a pointer to a buffer large enough to receive the result + + @param dstLength assigned the length dst is required to be. Must not be nullptr. + */ + static Error SK_WARN_UNUSED_RESULT Decode(const void* src, size_t srcLength, + void* dst, size_t* dstLength); +}; + +#endif // SkBase64_DEFINED diff --git a/src/deps/skia/include/utils/SkCamera.h b/src/deps/skia/include/utils/SkCamera.h new file mode 100644 index 000000000..51a7d4d6b --- /dev/null +++ b/src/deps/skia/include/utils/SkCamera.h @@ -0,0 +1,107 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +// Inspired by Rob Johnson's most excellent QuickDraw GX sample code + +#ifndef SkCamera_DEFINED +#define SkCamera_DEFINED + +#include "include/core/SkM44.h" +#include "include/core/SkMatrix.h" +#include "include/private/SkNoncopyable.h" + +// NOTE -- This entire header / impl is deprecated, and will be removed from Skia soon. +// +// Skia now has support for a 4x matrix (SkM44) in SkCanvas. +// + +class SkCanvas; + +// DEPRECATED +class SkPatch3D { +public: + SkPatch3D(); + + void reset(); + void transform(const SkM44&, SkPatch3D* dst = nullptr) const; + + // dot a unit vector with the patch's normal + SkScalar dotWith(SkScalar dx, SkScalar dy, SkScalar dz) const; + SkScalar dotWith(const SkV3& v) const { + return this->dotWith(v.x, v.y, v.z); + } + + // deprecated, but still here for animator (for now) + void rotate(SkScalar /*x*/, SkScalar /*y*/, SkScalar /*z*/) {} + void rotateDegrees(SkScalar /*x*/, SkScalar /*y*/, SkScalar /*z*/) {} + +private: +public: // make public for SkDraw3D for now + SkV3 fU, fV; + SkV3 fOrigin; + + friend class SkCamera3D; +}; + +// DEPRECATED +class SkCamera3D { +public: + SkCamera3D(); + + void reset(); + void update(); + void patchToMatrix(const SkPatch3D&, SkMatrix* matrix) const; + + SkV3 fLocation; // origin of the camera's space + SkV3 fAxis; // view direction + SkV3 fZenith; // up direction + SkV3 fObserver; // eye position (may not be the same as the origin) + +private: + mutable SkMatrix fOrientation; + mutable bool fNeedToUpdate; + + void doUpdate() const; +}; + +// DEPRECATED +class SK_API Sk3DView : SkNoncopyable { +public: + Sk3DView(); + ~Sk3DView(); + + void save(); + void restore(); + + void translate(SkScalar x, SkScalar y, SkScalar z); + void rotateX(SkScalar deg); + void rotateY(SkScalar deg); + void rotateZ(SkScalar deg); + +#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK + void setCameraLocation(SkScalar x, SkScalar y, SkScalar z); + SkScalar getCameraLocationX() const; + SkScalar getCameraLocationY() const; + SkScalar getCameraLocationZ() const; +#endif + + void getMatrix(SkMatrix*) const; + void applyToCanvas(SkCanvas*) const; + + SkScalar dotWithNormal(SkScalar dx, SkScalar dy, SkScalar dz) const; + +private: + struct Rec { + Rec* fNext; + SkM44 fMatrix; + }; + Rec* fRec; + Rec fInitialRec; + SkCamera3D fCamera; +}; + +#endif diff --git a/src/deps/skia/include/utils/SkCanvasStateUtils.h b/src/deps/skia/include/utils/SkCanvasStateUtils.h new file mode 100644 index 000000000..8b5c65ec0 --- /dev/null +++ b/src/deps/skia/include/utils/SkCanvasStateUtils.h @@ -0,0 +1,78 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCanvasStateUtils_DEFINED +#define SkCanvasStateUtils_DEFINED + +#include "include/core/SkCanvas.h" + +class SkCanvasState; + +/** + * A set of functions that are useful for copying the state of an SkCanvas + * across a library boundary where the Skia library on the other side of the + * boundary may be newer. The expected usage is outline below... + * + * Lib Boundary + * CaptureCanvasState(...) ||| + * SkCanvas --> SkCanvasState ||| + * ||| CreateFromCanvasState(...) + * ||| SkCanvasState --> SkCanvas` + * ||| Draw into SkCanvas` + * ||| Unref SkCanvas` + * ReleaseCanvasState(...) ||| + * + */ +class SK_API SkCanvasStateUtils { +public: + /** + * Captures the current state of the canvas into an opaque ptr that is safe + * to pass to a different instance of Skia (which may be the same version, + * or may be newer). The function will return NULL in the event that one of the + * following conditions are true. + * 1) the canvas device type is not supported (currently only raster is supported) + * 2) the canvas clip type is not supported (currently only non-AA clips are supported) + * + * It is recommended that the original canvas also not be used until all + * canvases that have been created using its captured state have been dereferenced. + * + * Finally, it is important to note that any draw filters attached to the + * canvas are NOT currently captured. + * + * @param canvas The canvas you wish to capture the current state of. + * @return NULL or an opaque ptr that can be passed to CreateFromCanvasState + * to reconstruct the canvas. The caller is responsible for calling + * ReleaseCanvasState to free the memory associated with this state. + */ + static SkCanvasState* CaptureCanvasState(SkCanvas* canvas); + + /** + * Create a new SkCanvas from the captured state of another SkCanvas. The + * function will return NULL in the event that one of the + * following conditions are true. + * 1) the captured state is in an unrecognized format + * 2) the captured canvas device type is not supported + * + * @param state Opaque object created by CaptureCanvasState. + * @return NULL or an SkCanvas* whose devices and matrix/clip state are + * identical to the captured canvas. The caller is responsible for + * calling unref on the SkCanvas. + */ + static std::unique_ptr<SkCanvas> MakeFromCanvasState(const SkCanvasState* state); + + /** + * Free the memory associated with the captured canvas state. The state + * should not be released until all SkCanvas objects created using that + * state have been dereferenced. Must be called from the same library + * instance that created the state via CaptureCanvasState. + * + * @param state The captured state you wish to dispose of. + */ + static void ReleaseCanvasState(SkCanvasState* state); +}; + +#endif diff --git a/src/deps/skia/include/utils/SkCustomTypeface.h b/src/deps/skia/include/utils/SkCustomTypeface.h new file mode 100644 index 000000000..8430e6f26 --- /dev/null +++ b/src/deps/skia/include/utils/SkCustomTypeface.h @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Google LLC + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkCustomTypeface_DEFINED +#define SkCustomTypeface_DEFINED + +#include "include/core/SkFontMetrics.h" +#include "include/core/SkFontStyle.h" +#include "include/core/SkImage.h" +#include "include/core/SkPaint.h" +#include "include/core/SkPath.h" +#include "include/core/SkPicture.h" +#include "include/core/SkTypeface.h" + +#include <vector> + +class SkStream; + +class SkCustomTypefaceBuilder { +public: + SkCustomTypefaceBuilder(); + + void setGlyph(SkGlyphID, float advance, const SkPath&); + void setGlyph(SkGlyphID, float advance, const SkPath&, const SkPaint&); + void setGlyph(SkGlyphID, float advance, sk_sp<SkImage>, float scale); + void setGlyph(SkGlyphID, float advance, sk_sp<SkPicture>); + + void setMetrics(const SkFontMetrics& fm, float scale = 1); + void setFontStyle(SkFontStyle); + + sk_sp<SkTypeface> detach(); + +private: + std::vector<SkPath> fPaths; + std::vector<float> fAdvances; + SkFontMetrics fMetrics; + SkFontStyle fStyle; + + static sk_sp<SkTypeface> Deserialize(SkStream*); + + friend class SkTypeface; +}; + +#endif diff --git a/src/deps/skia/include/utils/SkEventTracer.h b/src/deps/skia/include/utils/SkEventTracer.h new file mode 100644 index 000000000..4ef6160ea --- /dev/null +++ b/src/deps/skia/include/utils/SkEventTracer.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2014 Google Inc. All rights reserved. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkEventTracer_DEFINED +#define SkEventTracer_DEFINED + +// The class in this header defines the interface between Skia's internal +// tracing macros and an external entity (e.g., Chrome) that will consume them. +// Such an entity should subclass SkEventTracer and provide an instance of +// that event to SkEventTracer::SetInstance. + +// If you're looking for the tracing macros to instrument Skia itself, those +// live in src/core/SkTraceEvent.h + +#include "include/core/SkTypes.h" + +class SK_API SkEventTracer { +public: + + typedef uint64_t Handle; + + /** + * If this is the first call to SetInstance or GetInstance then the passed instance is + * installed and true is returned. Otherwise, false is returned. In either case ownership of the + * tracer is transferred and it will be deleted when no longer needed. + */ + static bool SetInstance(SkEventTracer*); + + /** + * Gets the event tracer. If this is the first call to SetInstance or GetIntance then a default + * event tracer is installed and returned. + */ + static SkEventTracer* GetInstance(); + + virtual ~SkEventTracer() = default; + + // The pointer returned from GetCategoryGroupEnabled() points to a + // value with zero or more of the following bits. Used in this class only. + // The TRACE_EVENT macros should only use the value as a bool. + // These values must be in sync with macro values in trace_event.h in chromium. + enum CategoryGroupEnabledFlags { + // Category group enabled for the recording mode. + kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0, + // Category group enabled for the monitoring mode. + kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1, + // Category group enabled by SetEventCallbackEnabled(). + kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2, + }; + + virtual const uint8_t* getCategoryGroupEnabled(const char* name) = 0; + virtual const char* getCategoryGroupName(const uint8_t* categoryEnabledFlag) = 0; + + virtual SkEventTracer::Handle + addTraceEvent(char phase, + const uint8_t* categoryEnabledFlag, + const char* name, + uint64_t id, + int32_t numArgs, + const char** argNames, + const uint8_t* argTypes, + const uint64_t* argValues, + uint8_t flags) = 0; + + virtual void + updateTraceEventDuration(const uint8_t* categoryEnabledFlag, + const char* name, + SkEventTracer::Handle handle) = 0; + +protected: + SkEventTracer() = default; + SkEventTracer(const SkEventTracer&) = delete; + SkEventTracer& operator=(const SkEventTracer&) = delete; +}; + +#endif // SkEventTracer_DEFINED diff --git a/src/deps/skia/include/utils/SkNWayCanvas.h b/src/deps/skia/include/utils/SkNWayCanvas.h new file mode 100644 index 000000000..4470567de --- /dev/null +++ b/src/deps/skia/include/utils/SkNWayCanvas.h @@ -0,0 +1,92 @@ + +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkNWayCanvas_DEFINED +#define SkNWayCanvas_DEFINED + +#include "include/core/SkCanvasVirtualEnforcer.h" +#include "include/private/SkTDArray.h" +#include "include/utils/SkNoDrawCanvas.h" + +class SK_API SkNWayCanvas : public SkCanvasVirtualEnforcer<SkNoDrawCanvas> { +public: + SkNWayCanvas(int width, int height); + ~SkNWayCanvas() override; + + virtual void addCanvas(SkCanvas*); + virtual void removeCanvas(SkCanvas*); + virtual void removeAll(); + +protected: + SkTDArray<SkCanvas*> fList; + + void willSave() override; + SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override; + bool onDoSaveBehind(const SkRect*) override; + void willRestore() override; + + void didConcat44(const SkM44&) override; + void didSetM44(const SkM44&) override; + void didScale(SkScalar, SkScalar) override; + void didTranslate(SkScalar, SkScalar) override; + + void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override; + void onDrawGlyphRunList(const SkGlyphRunList&, const SkPaint&) override; + void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, + const SkPaint& paint) override; + void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4], + const SkPoint texCoords[4], SkBlendMode, const SkPaint& paint) override; + + void onDrawPaint(const SkPaint&) override; + void onDrawBehind(const SkPaint&) override; + void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override; + void onDrawRect(const SkRect&, const SkPaint&) override; + void onDrawRegion(const SkRegion&, const SkPaint&) override; + void onDrawOval(const SkRect&, const SkPaint&) override; + void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override; + void onDrawRRect(const SkRRect&, const SkPaint&) override; + void onDrawPath(const SkPath&, const SkPaint&) override; + + void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&, + const SkPaint*) override; + void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&, + const SkPaint*, SrcRectConstraint) override; + void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode, + const SkPaint*) override; + void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int, + SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override; + + void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override; + void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override; + + void onClipRect(const SkRect&, SkClipOp, ClipEdgeStyle) override; + void onClipRRect(const SkRRect&, SkClipOp, ClipEdgeStyle) override; + void onClipPath(const SkPath&, SkClipOp, ClipEdgeStyle) override; + void onClipShader(sk_sp<SkShader>, SkClipOp) override; + void onClipRegion(const SkRegion&, SkClipOp) override; + void onResetClip() override; + + void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override; + void onDrawDrawable(SkDrawable*, const SkMatrix*) override; + void onDrawAnnotation(const SkRect&, const char[], SkData*) override; + + void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&, + SkBlendMode) override; + void onDrawEdgeAAImageSet2(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[], + const SkSamplingOptions&,const SkPaint*, SrcRectConstraint) override; + + void onFlush() override; + + class Iter; + +private: + using INHERITED = SkCanvasVirtualEnforcer<SkNoDrawCanvas>; +}; + + +#endif diff --git a/src/deps/skia/include/utils/SkNoDrawCanvas.h b/src/deps/skia/include/utils/SkNoDrawCanvas.h new file mode 100644 index 000000000..3f2563873 --- /dev/null +++ b/src/deps/skia/include/utils/SkNoDrawCanvas.h @@ -0,0 +1,80 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkNoDrawCanvas_DEFINED +#define SkNoDrawCanvas_DEFINED + +#include "include/core/SkCanvas.h" +#include "include/core/SkCanvasVirtualEnforcer.h" + +struct SkIRect; + +// SkNoDrawCanvas is a helper for SkCanvas subclasses which do not need to +// actually rasterize (e.g., analysis of the draw calls). +// +// It provides the following simplifications: +// +// * not backed by any device/pixels +// * conservative clipping (clipping calls only use rectangles) +// +class SK_API SkNoDrawCanvas : public SkCanvasVirtualEnforcer<SkCanvas> { +public: + SkNoDrawCanvas(int width, int height); + SkNoDrawCanvas(const SkIRect&); + + explicit SkNoDrawCanvas(sk_sp<SkBaseDevice> device); + + // Optimization to reset state to be the same as after construction. + void resetCanvas(int w, int h) { this->resetForNextPicture(SkIRect::MakeWH(w, h)); } + void resetCanvas(const SkIRect& rect) { this->resetForNextPicture(rect); } + +protected: + SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec& rec) override; + bool onDoSaveBehind(const SkRect*) override; + + // No-op overrides for aborting rasterization earlier than SkNullBlitter. + void onDrawAnnotation(const SkRect&, const char[], SkData*) override {} + void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override {} + void onDrawDrawable(SkDrawable*, const SkMatrix*) override {} + void onDrawTextBlob(const SkTextBlob*, SkScalar, SkScalar, const SkPaint&) override {} + void onDrawPatch(const SkPoint[12], const SkColor[4], const SkPoint[4], SkBlendMode, + const SkPaint&) override {} + + void onDrawPaint(const SkPaint&) override {} + void onDrawBehind(const SkPaint&) override {} + void onDrawPoints(PointMode, size_t, const SkPoint[], const SkPaint&) override {} + void onDrawRect(const SkRect&, const SkPaint&) override {} + void onDrawRegion(const SkRegion&, const SkPaint&) override {} + void onDrawOval(const SkRect&, const SkPaint&) override {} + void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override {} + void onDrawRRect(const SkRRect&, const SkPaint&) override {} + void onDrawPath(const SkPath&, const SkPaint&) override {} + + void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&, + const SkPaint*) override {} + void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&, + const SkPaint*, SrcRectConstraint) override {} + void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode, + const SkPaint*) override {} + void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int, + SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override {} + + void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override {} + void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override {} + void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override {} + + void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&, + SkBlendMode) override {} + void onDrawEdgeAAImageSet2(const ImageSetEntry[], int, const SkPoint[], const SkMatrix[], + const SkSamplingOptions&, const SkPaint*, + SrcRectConstraint) override {} + +private: + using INHERITED = SkCanvasVirtualEnforcer<SkCanvas>; +}; + +#endif // SkNoDrawCanvas_DEFINED diff --git a/src/deps/skia/include/utils/SkNullCanvas.h b/src/deps/skia/include/utils/SkNullCanvas.h new file mode 100644 index 000000000..d63bf5c41 --- /dev/null +++ b/src/deps/skia/include/utils/SkNullCanvas.h @@ -0,0 +1,18 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkNullCanvas_DEFINED +#define SkNullCanvas_DEFINED + +#include "include/core/SkCanvas.h" + +/** + * Creates a canvas that draws nothing. This is useful for performance testing. + */ +SK_API std::unique_ptr<SkCanvas> SkMakeNullCanvas(); + +#endif diff --git a/src/deps/skia/include/utils/SkOrderedFontMgr.h b/src/deps/skia/include/utils/SkOrderedFontMgr.h new file mode 100644 index 000000000..8d6152df3 --- /dev/null +++ b/src/deps/skia/include/utils/SkOrderedFontMgr.h @@ -0,0 +1,53 @@ +/* + * Copyright 2021 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkOrderedFontMgr_DEFINED +#define SkOrderedFontMgr_DEFINED + +#include "include/core/SkFontMgr.h" +#include <vector> +/** + * Collects an order list of other font managers, and visits them in order + * when a request to find or match is issued. + * + * Note: this explicitly fails on any attempt to Make a typeface: all of + * those requests will return null. + */ +class SK_API SkOrderedFontMgr : public SkFontMgr { +public: + SkOrderedFontMgr(); + ~SkOrderedFontMgr() override; + + void append(sk_sp<SkFontMgr>); + +protected: + int onCountFamilies() const override; + void onGetFamilyName(int index, SkString* familyName) const override; + SkFontStyleSet* onCreateStyleSet(int index)const override; + + SkFontStyleSet* onMatchFamily(const char familyName[]) const override; + + SkTypeface* onMatchFamilyStyle(const char familyName[], const SkFontStyle&) const override; + SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&, + const char* bcp47[], int bcp47Count, + SkUnichar character) const override; + + // Note: all of these always return null + sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override; + sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, + int ttcIndex) const override; + sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>, + const SkFontArguments&) const override; + sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override; + + sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override; + +private: + std::vector<sk_sp<SkFontMgr>> fList; +}; + +#endif diff --git a/src/deps/skia/include/utils/SkPaintFilterCanvas.h b/src/deps/skia/include/utils/SkPaintFilterCanvas.h new file mode 100644 index 000000000..43b729106 --- /dev/null +++ b/src/deps/skia/include/utils/SkPaintFilterCanvas.h @@ -0,0 +1,107 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPaintFilterCanvas_DEFINED +#define SkPaintFilterCanvas_DEFINED + +#include "include/core/SkCanvasVirtualEnforcer.h" +#include "include/utils/SkNWayCanvas.h" + +class SkAndroidFrameworkUtils; + +/** \class SkPaintFilterCanvas + + A utility proxy base class for implementing draw/paint filters. +*/ +class SK_API SkPaintFilterCanvas : public SkCanvasVirtualEnforcer<SkNWayCanvas> { +public: + /** + * The new SkPaintFilterCanvas is configured for forwarding to the + * specified canvas. Also copies the target canvas matrix and clip bounds. + */ + SkPaintFilterCanvas(SkCanvas* canvas); + + enum Type { + kPicture_Type, + }; + + // Forwarded to the wrapped canvas. + SkISize getBaseLayerSize() const override { return proxy()->getBaseLayerSize(); } + GrRecordingContext* recordingContext() override { return proxy()->recordingContext(); } + +protected: + /** + * Called with the paint that will be used to draw the specified type. + * The implementation may modify the paint as they wish. + * + * The result bool is used to determine whether the draw op is to be + * executed (true) or skipped (false). + * + * Note: The base implementation calls onFilter() for top-level/explicit paints only. + * To also filter encapsulated paints (e.g. SkPicture, SkTextBlob), clients may need to + * override the relevant methods (i.e. drawPicture, drawTextBlob). + */ + virtual bool onFilter(SkPaint& paint) const = 0; + + void onDrawPaint(const SkPaint&) override; + void onDrawBehind(const SkPaint&) override; + void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override; + void onDrawRect(const SkRect&, const SkPaint&) override; + void onDrawRRect(const SkRRect&, const SkPaint&) override; + void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override; + void onDrawRegion(const SkRegion&, const SkPaint&) override; + void onDrawOval(const SkRect&, const SkPaint&) override; + void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override; + void onDrawPath(const SkPath&, const SkPaint&) override; + + void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&, + const SkPaint*) override; + void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&, + const SkPaint*, SrcRectConstraint) override; + void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode, + const SkPaint*) override; + void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int, + SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override; + + void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override; + void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4], + const SkPoint texCoords[4], SkBlendMode, + const SkPaint& paint) override; + void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override; + void onDrawDrawable(SkDrawable*, const SkMatrix*) override; + + void onDrawGlyphRunList(const SkGlyphRunList&, const SkPaint&) override; + void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, + const SkPaint& paint) override; + void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) override; + void onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) override; + + void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&, + SkBlendMode) override; + void onDrawEdgeAAImageSet2(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[], + const SkSamplingOptions&,const SkPaint*, SrcRectConstraint) override; + + // Forwarded to the wrapped canvas. + sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override; + bool onPeekPixels(SkPixmap* pixmap) override; + bool onAccessTopLayerPixels(SkPixmap* pixmap) override; + SkImageInfo onImageInfo() const override; + bool onGetProps(SkSurfaceProps* props) const override; + +private: + class AutoPaintFilter; + + SkCanvas* proxy() const { SkASSERT(fList.count() == 1); return fList[0]; } + + SkPaintFilterCanvas* internal_private_asPaintFilterCanvas() const override { + return const_cast<SkPaintFilterCanvas*>(this); + } + + friend class SkAndroidFrameworkUtils; +}; + +#endif diff --git a/src/deps/skia/include/utils/SkParse.h b/src/deps/skia/include/utils/SkParse.h new file mode 100644 index 000000000..9a738bace --- /dev/null +++ b/src/deps/skia/include/utils/SkParse.h @@ -0,0 +1,32 @@ + +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + +#ifndef SkParse_DEFINED +#define SkParse_DEFINED + +#include "include/core/SkColor.h" + +class SK_API SkParse { +public: + static int Count(const char str[]); // number of scalars or int values + static int Count(const char str[], char separator); + static const char* FindColor(const char str[], SkColor* value); + static const char* FindHex(const char str[], uint32_t* value); + static const char* FindMSec(const char str[], SkMSec* value); + static const char* FindNamedColor(const char str[], size_t len, SkColor* color); + static const char* FindS32(const char str[], int32_t* value); + static const char* FindScalar(const char str[], SkScalar* value); + static const char* FindScalars(const char str[], SkScalar value[], int count); + + static bool FindBool(const char str[], bool* value); + // return the index of str in list[], or -1 if not found + static int FindList(const char str[], const char list[]); +}; + +#endif diff --git a/src/deps/skia/include/utils/SkParsePath.h b/src/deps/skia/include/utils/SkParsePath.h new file mode 100644 index 000000000..f1c81b2f2 --- /dev/null +++ b/src/deps/skia/include/utils/SkParsePath.h @@ -0,0 +1,25 @@ + +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + + +#ifndef SkParsePath_DEFINED +#define SkParsePath_DEFINED + +#include "include/core/SkPath.h" + +class SkString; + +class SK_API SkParsePath { +public: + static bool FromSVGString(const char str[], SkPath*); + + enum class PathEncoding { Absolute, Relative }; + static void ToSVGString(const SkPath&, SkString*, PathEncoding = PathEncoding::Absolute); +}; + +#endif diff --git a/src/deps/skia/include/utils/SkRandom.h b/src/deps/skia/include/utils/SkRandom.h new file mode 100644 index 000000000..ba40732b9 --- /dev/null +++ b/src/deps/skia/include/utils/SkRandom.h @@ -0,0 +1,169 @@ +/* + * Copyright 2006 The Android Open Source Project + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRandom_DEFINED +#define SkRandom_DEFINED + +#include "include/core/SkScalar.h" +#include "include/private/SkFixed.h" +#include "include/private/SkFloatBits.h" + +/** \class SkRandom + + Utility class that implements pseudo random 32bit numbers using Marsaglia's + multiply-with-carry "mother of all" algorithm. Unlike rand(), this class holds + its own state, so that multiple instances can be used with no side-effects. + + Has a large period and all bits are well-randomized. + */ +class SkRandom { +public: + SkRandom() { init(0); } + SkRandom(uint32_t seed) { init(seed); } + SkRandom(const SkRandom& rand) : fK(rand.fK), fJ(rand.fJ) {} + + SkRandom& operator=(const SkRandom& rand) { + fK = rand.fK; + fJ = rand.fJ; + + return *this; + } + + /** Return the next pseudo random number as an unsigned 32bit value. + */ + uint32_t nextU() { + fK = kKMul*(fK & 0xffff) + (fK >> 16); + fJ = kJMul*(fJ & 0xffff) + (fJ >> 16); + return (((fK << 16) | (fK >> 16)) + fJ); + } + + /** Return the next pseudo random number as a signed 32bit value. + */ + int32_t nextS() { return (int32_t)this->nextU(); } + + /** + * Returns value [0...1) as an IEEE float + */ + float nextF() { + int floatint = 0x3f800000 | (int)(this->nextU() >> 9); + float f = SkBits2Float(floatint) - 1.0f; + return f; + } + + /** + * Returns value [min...max) as a float + */ + float nextRangeF(float min, float max) { + return min + this->nextF() * (max - min); + } + + /** Return the next pseudo random number, as an unsigned value of + at most bitCount bits. + @param bitCount The maximum number of bits to be returned + */ + uint32_t nextBits(unsigned bitCount) { + SkASSERT(bitCount > 0 && bitCount <= 32); + return this->nextU() >> (32 - bitCount); + } + + /** Return the next pseudo random unsigned number, mapped to lie within + [min, max] inclusive. + */ + uint32_t nextRangeU(uint32_t min, uint32_t max) { + SkASSERT(min <= max); + uint32_t range = max - min + 1; + if (0 == range) { + return this->nextU(); + } else { + return min + this->nextU() % range; + } + } + + /** Return the next pseudo random unsigned number, mapped to lie within + [0, count). + */ + uint32_t nextULessThan(uint32_t count) { + SkASSERT(count > 0); + return this->nextRangeU(0, count - 1); + } + + /** Return the next pseudo random number expressed as a SkScalar + in the range [0..SK_Scalar1). + */ + SkScalar nextUScalar1() { return SkFixedToScalar(this->nextUFixed1()); } + + /** Return the next pseudo random number expressed as a SkScalar + in the range [min..max). + */ + SkScalar nextRangeScalar(SkScalar min, SkScalar max) { + return this->nextUScalar1() * (max - min) + min; + } + + /** Return the next pseudo random number expressed as a SkScalar + in the range [-SK_Scalar1..SK_Scalar1). + */ + SkScalar nextSScalar1() { return SkFixedToScalar(this->nextSFixed1()); } + + /** Return the next pseudo random number as a bool. + */ + bool nextBool() { return this->nextU() >= 0x80000000; } + + /** A biased version of nextBool(). + */ + bool nextBiasedBool(SkScalar fractionTrue) { + SkASSERT(fractionTrue >= 0 && fractionTrue <= SK_Scalar1); + return this->nextUScalar1() <= fractionTrue; + } + + /** Reset the random object. + */ + void setSeed(uint32_t seed) { init(seed); } + +private: + // Initialize state variables with LCG. + // We must ensure that both J and K are non-zero, otherwise the + // multiply-with-carry step will forevermore return zero. + void init(uint32_t seed) { + fK = NextLCG(seed); + if (0 == fK) { + fK = NextLCG(fK); + } + fJ = NextLCG(fK); + if (0 == fJ) { + fJ = NextLCG(fJ); + } + SkASSERT(0 != fK && 0 != fJ); + } + static uint32_t NextLCG(uint32_t seed) { return kMul*seed + kAdd; } + + /** Return the next pseudo random number expressed as an unsigned SkFixed + in the range [0..SK_Fixed1). + */ + SkFixed nextUFixed1() { return this->nextU() >> 16; } + + /** Return the next pseudo random number expressed as a signed SkFixed + in the range [-SK_Fixed1..SK_Fixed1). + */ + SkFixed nextSFixed1() { return this->nextS() >> 15; } + + // See "Numerical Recipes in C", 1992 page 284 for these constants + // For the LCG that sets the initial state from a seed + enum { + kMul = 1664525, + kAdd = 1013904223 + }; + // Constants for the multiply-with-carry steps + enum { + kKMul = 30345, + kJMul = 18000, + }; + + uint32_t fK; + uint32_t fJ; +}; + +#endif diff --git a/src/deps/skia/include/utils/SkShadowUtils.h b/src/deps/skia/include/utils/SkShadowUtils.h new file mode 100644 index 000000000..b25770c1b --- /dev/null +++ b/src/deps/skia/include/utils/SkShadowUtils.h @@ -0,0 +1,85 @@ + +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef SkShadowUtils_DEFINED +#define SkShadowUtils_DEFINED + +#include "include/core/SkColor.h" +#include "include/core/SkPoint3.h" +#include "include/core/SkScalar.h" +#include "include/private/SkShadowFlags.h" + +class SkCanvas; +class SkMatrix; +class SkPath; +class SkResourceCache; + +class SK_API SkShadowUtils { +public: + /** + * Draw an offset spot shadow and outlining ambient shadow for the given path using a disc + * light. The shadow may be cached, depending on the path type and canvas matrix. If the + * matrix is perspective or the path is volatile, it will not be cached. + * + * @param canvas The canvas on which to draw the shadows. + * @param path The occluder used to generate the shadows. + * @param zPlaneParams Values for the plane function which returns the Z offset of the + * occluder from the canvas based on local x and y values (the current matrix is not applied). + * @param lightPos Generally, the 3D position of the light relative to the canvas plane. + * If kDirectionalLight_ShadowFlag is set, this specifies a vector pointing + * towards the light. + * @param lightRadius Generally, the radius of the disc light. + * If DirectionalLight_ShadowFlag is set, this specifies the amount of + * blur when the occluder is at Z offset == 1. The blur will grow linearly + * as the Z value increases. + * @param ambientColor The color of the ambient shadow. + * @param spotColor The color of the spot shadow. + * @param flags Options controlling opaque occluder optimizations, shadow appearance, + * and light position. See SkShadowFlags. + */ + static void DrawShadow(SkCanvas* canvas, const SkPath& path, const SkPoint3& zPlaneParams, + const SkPoint3& lightPos, SkScalar lightRadius, + SkColor ambientColor, SkColor spotColor, + uint32_t flags = SkShadowFlags::kNone_ShadowFlag); + + /** + * Generate bounding box for shadows relative to path. Includes both the ambient and spot + * shadow bounds. + * + * @param ctm Current transformation matrix to device space. + * @param path The occluder used to generate the shadows. + * @param zPlaneParams Values for the plane function which returns the Z offset of the + * occluder from the canvas based on local x and y values (the current matrix is not applied). + * @param lightPos Generally, the 3D position of the light relative to the canvas plane. + * If kDirectionalLight_ShadowFlag is set, this specifies a vector pointing + * towards the light. + * @param lightRadius Generally, the radius of the disc light. + * If DirectionalLight_ShadowFlag is set, this specifies the amount of + * blur when the occluder is at Z offset == 1. The blur will grow linearly + * as the Z value increases. + * @param flags Options controlling opaque occluder optimizations, shadow appearance, + * and light position. See SkShadowFlags. + * @param bounds Return value for shadow bounding box. + * @return Returns true if successful, false otherwise. + */ + static bool GetLocalBounds(const SkMatrix& ctm, const SkPath& path, + const SkPoint3& zPlaneParams, const SkPoint3& lightPos, + SkScalar lightRadius, uint32_t flags, SkRect* bounds); + + /** + * Helper routine to compute color values for one-pass tonal alpha. + * + * @param inAmbientColor Original ambient color + * @param inSpotColor Original spot color + * @param outAmbientColor Modified ambient color + * @param outSpotColor Modified spot color + */ + static void ComputeTonalColors(SkColor inAmbientColor, SkColor inSpotColor, + SkColor* outAmbientColor, SkColor* outSpotColor); +}; + +#endif diff --git a/src/deps/skia/include/utils/SkTextUtils.h b/src/deps/skia/include/utils/SkTextUtils.h new file mode 100644 index 000000000..6cd3771e3 --- /dev/null +++ b/src/deps/skia/include/utils/SkTextUtils.h @@ -0,0 +1,38 @@ +/* + * Copyright 2018 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTextUtils_DEFINED +#define SkTextUtils_DEFINED + +#include "include/core/SkCanvas.h" +#include "include/core/SkFont.h" +#include "include/core/SkPaint.h" +#include "include/core/SkString.h" + +class SkPath; + +class SK_API SkTextUtils { +public: + enum Align { + kLeft_Align, + kCenter_Align, + kRight_Align, + }; + + static void Draw(SkCanvas*, const void* text, size_t size, SkTextEncoding, + SkScalar x, SkScalar y, const SkFont&, const SkPaint&, Align = kLeft_Align); + + static void DrawString(SkCanvas* canvas, const char text[], SkScalar x, SkScalar y, + const SkFont& font, const SkPaint& paint, Align align = kLeft_Align) { + Draw(canvas, text, strlen(text), SkTextEncoding::kUTF8, x, y, font, paint, align); + } + + static void GetPath(const void* text, size_t length, SkTextEncoding, SkScalar x, SkScalar y, + const SkFont&, SkPath*); +}; + +#endif diff --git a/src/deps/skia/include/utils/SkTraceEventPhase.h b/src/deps/skia/include/utils/SkTraceEventPhase.h new file mode 100644 index 000000000..38457be24 --- /dev/null +++ b/src/deps/skia/include/utils/SkTraceEventPhase.h @@ -0,0 +1,19 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +#ifndef SkTraceEventPhase_DEFINED +#define SkTraceEventPhase_DEFINED + +// Phase indicates the nature of an event entry. E.g. part of a begin/end pair. +#define TRACE_EVENT_PHASE_BEGIN ('B') +#define TRACE_EVENT_PHASE_END ('E') +#define TRACE_EVENT_PHASE_COMPLETE ('X') +#define TRACE_EVENT_PHASE_INSTANT ('I') +#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S') +#define TRACE_EVENT_PHASE_ASYNC_END ('F') +#define TRACE_EVENT_PHASE_COUNTER ('C') +#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N') +#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O') +#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D') + +#endif // SkTraceEventPhase_DEFINED diff --git a/src/deps/skia/include/utils/mac/BUILD.bazel b/src/deps/skia/include/utils/mac/BUILD.bazel new file mode 100644 index 000000000..5af933472 --- /dev/null +++ b/src/deps/skia/include/utils/mac/BUILD.bazel @@ -0,0 +1,13 @@ +load("//bazel:macros.bzl", "generated_cc_atom") + +generated_cc_atom( + name = "SkCGUtils_hdr", + hdrs = ["SkCGUtils.h"], + visibility = ["//:__subpackages__"], + deps = [ + "//include/core:SkImageInfo_hdr", + "//include/core:SkImage_hdr", + "//include/core:SkPixmap_hdr", + "//include/core:SkSize_hdr", + ], +) diff --git a/src/deps/skia/include/utils/mac/SkCGUtils.h b/src/deps/skia/include/utils/mac/SkCGUtils.h new file mode 100644 index 000000000..a320dd8d4 --- /dev/null +++ b/src/deps/skia/include/utils/mac/SkCGUtils.h @@ -0,0 +1,78 @@ + +/* + * Copyright 2011 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef SkCGUtils_DEFINED +#define SkCGUtils_DEFINED + +#include "include/core/SkImage.h" +#include "include/core/SkImageInfo.h" +#include "include/core/SkPixmap.h" +#include "include/core/SkSize.h" + +#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS) + +#ifdef SK_BUILD_FOR_MAC +#include <ApplicationServices/ApplicationServices.h> +#endif + +#ifdef SK_BUILD_FOR_IOS +#include <CoreGraphics/CoreGraphics.h> +#endif + +class SkBitmap; +class SkData; +class SkPixmap; +class SkStreamRewindable; + +SK_API CGContextRef SkCreateCGContext(const SkPixmap&); + +/** + * Given a CGImage, allocate an SkBitmap and copy the image's pixels into it. If scaleToFit is not + * null, use it to determine the size of the bitmap, and scale the image to fill the bitmap. + * Otherwise use the image's width/height. + * + * On failure, return false, and leave bitmap unchanged. + */ +SK_API bool SkCreateBitmapFromCGImage(SkBitmap* dst, CGImageRef src); + +SK_API sk_sp<SkImage> SkMakeImageFromCGImage(CGImageRef); + +/** + * Copy the pixels from src into the memory specified by info/rowBytes/dstPixels. On failure, + * return false (e.g. ImageInfo incompatible with src). + */ +SK_API bool SkCopyPixelsFromCGImage(const SkImageInfo& info, size_t rowBytes, void* dstPixels, + CGImageRef src); +static inline bool SkCopyPixelsFromCGImage(const SkPixmap& dst, CGImageRef src) { + return SkCopyPixelsFromCGImage(dst.info(), dst.rowBytes(), dst.writable_addr(), src); +} + +/** + * Create an imageref from the specified bitmap using the specified colorspace. + * If space is NULL, then CGColorSpaceCreateDeviceRGB() is used. + */ +SK_API CGImageRef SkCreateCGImageRefWithColorspace(const SkBitmap& bm, + CGColorSpaceRef space); + +/** + * Create an imageref from the specified bitmap using the colorspace returned + * by CGColorSpaceCreateDeviceRGB() + */ +static inline CGImageRef SkCreateCGImageRef(const SkBitmap& bm) { + return SkCreateCGImageRefWithColorspace(bm, nil); +} + +/** + * Draw the bitmap into the specified CG context. The bitmap will be converted + * to a CGImage using the generic RGB colorspace. (x,y) specifies the position + * of the top-left corner of the bitmap. The bitmap is converted using the + * colorspace returned by CGColorSpaceCreateDeviceRGB() + */ +void SkCGDrawBitmap(CGContextRef, const SkBitmap&, float x, float y); + +#endif // defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS) +#endif // SkCGUtils_DEFINED diff --git a/src/javascript/jsc/bindings/Blob.h b/src/javascript/jsc/bindings/Blob.h new file mode 100644 index 000000000..b6840415d --- /dev/null +++ b/src/javascript/jsc/bindings/Blob.h @@ -0,0 +1,8 @@ +#pragma once + +#include "root.h" + +namespace WebCore { +using Blob = Bun__Blob; + +}
\ No newline at end of file diff --git a/src/javascript/jsc/bindings/headers-handwritten.h b/src/javascript/jsc/bindings/headers-handwritten.h index a190d694a..b9c5f04cc 100644 --- a/src/javascript/jsc/bindings/headers-handwritten.h +++ b/src/javascript/jsc/bindings/headers-handwritten.h @@ -176,6 +176,8 @@ typedef struct { uint64_t _value; } Bun__ArrayBuffer; +typedef struct Bun__Blob Bun__Blob; + #ifndef STRING_POINTER #define STRING_POINTER typedef struct StringPointer { diff --git a/src/javascript/jsc/bindings/webcore/CanvasImageSource.h b/src/javascript/jsc/bindings/webcore/CanvasImageSource.h new file mode 100644 index 000000000..465542e72 --- /dev/null +++ b/src/javascript/jsc/bindings/webcore/CanvasImageSource.h @@ -0,0 +1,15 @@ +#pragma once + +#include "root.h" +#include "include/core/SkImage.h" + +namespace WebCore { + +class CanvasImageSource : public RefCounted<CanvasImageSource> { + WTF_MAKE_ISO_ALLOCATED(CanvasImageSource); + +public: + m_blob* WebCore::Blob; +}; + +} // namespace WebCore
\ No newline at end of file diff --git a/src/javascript/jsc/bindings/webcore/DOMMatrix.h b/src/javascript/jsc/bindings/webcore/DOMMatrix.h new file mode 100644 index 000000000..168db3d3b --- /dev/null +++ b/src/javascript/jsc/bindings/webcore/DOMMatrix.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2017 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "DOMMatrix2DInit.h" + +namespace WebCore { + +struct DOMMatrixInit : DOMMatrix2DInit { + double m13 { 0 }; + double m14 { 0 }; + double m23 { 0 }; + double m24 { 0 }; + double m31 { 0 }; + double m32 { 0 }; + double m33 { 1 }; + double m34 { 0 }; + double m43 { 0 }; + double m44 { 1 }; + std::optional<bool> is2D; +}; + +} // namespace WebCore diff --git a/src/javascript/jsc/bindings/webcore/OffscreenCanvasRenderingContext2D.h b/src/javascript/jsc/bindings/webcore/OffscreenCanvasRenderingContext2D.h index cdf5fa78a..3d6edaf16 100644 --- a/src/javascript/jsc/bindings/webcore/OffscreenCanvasRenderingContext2D.h +++ b/src/javascript/jsc/bindings/webcore/OffscreenCanvasRenderingContext2D.h @@ -7,6 +7,19 @@ #include "CanvasRenderingContext2DSettings.h" #include "CanvasDirection.h" #include "CanvasPath.h" +#include "CanvasTextAlign.h" +#include "CanvasLineCap.h" +#include "CanvasLineJoin.h" +#include "CanvasGradient.h" +#include "CanvasPattern.h" +#include "CanvasTextBaseline.h" +#include "ImageSmoothingQuality.h" +#include "CanvasFillRule.h" +#include "ImageData.h" +#include "CanvasImageSource.h" + +#include "include/core/SKPaint.h" +#include "include/core/SKColor.h" namespace WebCore { @@ -154,8 +167,6 @@ public: void setPath(Path2D&); Ref<Path2D> getPath() const; - void setUsesDisplayListDrawing(bool flag) { m_usesDisplayListDrawing = flag; }; - String font() const { return state().fontString(); } CanvasTextAlign textAlign() const { return state().canvasTextAlign(); } @@ -168,10 +179,55 @@ public: void setDirection(Direction); private: + using LineCap = SKPaint::Cap; + using LineJoin = SKPaint::Join; + using CanvasStyle = SKPaint::Style; + using Color = SKColor; + + struct State final { + State(); + + String unparsedStrokeColor; + String unparsedFillColor; + CanvasStyle strokeStyle; + CanvasStyle fillStyle; + double lineWidth; + LineCap lineCap; + LineJoin lineJoin; + double miterLimit; + FloatSize shadowOffset; + float shadowBlur; + Color shadowColor; + double globalAlpha; + CompositeOperator globalComposite; + SkBlendMode globalBlend; + // AffineTransform transform; + bool hasInvertibleTransform; + Vector<double> lineDash; + double lineDashOffset; + bool imageSmoothingEnabled; + ImageSmoothingQuality imageSmoothingQuality; + TextAlign textAlign; + TextBaseline textBaseline; + Direction direction; + + String unparsedFont; + FontProxy font; + + CanvasLineCap canvasLineCap() const; + CanvasLineJoin canvasLineJoin() const; + CanvasTextAlign canvasTextAlign() const; + CanvasTextBaseline canvasTextBaseline() const; + String fontString() const; + String globalCompositeOperationString() const; + String shadowColorString() const; + }; + State state() const { return m_state; } bool isOffscreen2d() const { return true; } // const FontProxy* fontProxy() final; Ref<OffscreenCanvas> m_canvas; + State m_state; }; } // namespace WebCore diff --git a/types/bun/bun.d.ts b/types/bun/bun.d.ts index bb7872d59..ddd645acf 100644 --- a/types/bun/bun.d.ts +++ b/types/bun/bun.d.ts @@ -1652,33 +1652,6 @@ declare global { type EventListenerOrEventListenerObject = EventListener | EventListenerObject; } -declare global { - interface Console { - assert(condition?: boolean, ...data: any[]): void; - clear(): void; - count(label?: string): void; - countReset(label?: string): void; - debug(...data: any[]): void; - dir(item?: any, options?: any): void; - dirxml(...data: any[]): void; - error(...data: any[]): void; - group(...data: any[]): void; - groupCollapsed(...data: any[]): void; - groupEnd(): void; - info(...data: any[]): void; - log(...data: any[]): void; - table(tabularData?: any, properties?: string[]): void; - time(label?: string): void; - timeEnd(label?: string): void; - timeLog(label?: string, ...data: any[]): void; - timeStamp(label?: string): void; - trace(...data: any[]): void; - warn(...data: any[]): void; - } - - var console: Console; -} - export {}; export interface SystemError extends Error { |