diff --git a/.ci/vulkan_prebuilt_helpers.sh b/.ci/vulkan_prebuilt_helpers.sh new file mode 100644 index 0000000000..293c9a1e76 --- /dev/null +++ b/.ci/vulkan_prebuilt_helpers.sh @@ -0,0 +1,103 @@ +# MIT License + +# Copyright (c) 2022 humbletim + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# helper functions for downloading/installing platform-specific Vulkan SDKs +# originally meant for use from GitHub Actions +# see: https://github.com/humbletim/install-vulkan-sdk +# -- humbletim 2022.02 + +# example of running manually: +# $ . vulkan_prebuilt_helpers. +# $ VULKAN_SDK_VERSION=1.3.204.0 download_linux # fetches vulkan_sdk.tar.gz +# $ VULKAN_SDK=$PWD/VULKAN_SDK install_linux # installs + +function _os_filename() { + case $1 in + mac) echo vulkan_sdk.zip ;; + linux) echo vulkan_sdk.tar.gz ;; + windows) echo vulkan_sdk.exe ;; + *) echo "unknown $1" >&2 ; exit 9 ;; + esac +} + +function download_vulkan_installer() { + local os=$1 + local filename=$(_os_filename $os) + local url=https://sdk.lunarg.com/sdk/download/$VULKAN_SDK_VERSION/$os/$filename?Human=true + echo "_download_os_installer $os $filename $url" >&2 + if [[ -f $filename ]] ; then + echo "using cached: $filename" >&2 + else + curl --fail-with-body -s -L -o ${filename}.tmp $url || { echo "curl failed with error code: $?" >&2 ; curl -s -L --head $url >&2 ; exit 32 ; } + test -f ${filename}.tmp + mv -v ${filename}.tmp ${filename} + fi + ls -lh $filename >&2 +} + +function unpack_vulkan_installer() { + local os=$1 + local filename=$(_os_filename $os) + test -f $filename + install_${os} +} + +function install_linux() { + test -d $VULKAN_SDK && test -f vulkan_sdk.tar.gz + echo "extract just the SDK's prebuilt binaries ($VULKAN_SDK_VERSION/x86_64) from vulkan_sdk.tar.gz into $VULKAN_SDK" >&2 + tar -C "$VULKAN_SDK" --strip-components 2 -xf vulkan_sdk.tar.gz $VULKAN_SDK_VERSION/x86_64 +} + +function install_windows() { + test -d $VULKAN_SDK && test -f vulkan_sdk.exe + ./vulkan_sdk.exe in --al --am -c -t $VULKAN_SDK +} + +function install_mac() { + test -d $VULKAN_SDK && test -f vulkan_sdk.zip + unzip vulkan_sdk.zip + local InstallVulkan + if [[ -d InstallVulkan-${VULKAN_SDK_VERSION}.app/Contents ]] ; then + InstallVulkan=InstallVulkan-${VULKAN_SDK_VERSION} + elif [[ -d vulkansdk-macOS-${VULKAN_SDK_VERSION}.app/Contents ]] ; then + InstallVulkan=vulkansdk-macOS-${VULKAN_SDK_VERSION} + elif [[ -d InstallVulkan.app/Contents ]] ; then + InstallVulkan=InstallVulkan + else + echo "expecting ..vulkan.app/Contents folder (perhaps lunarg changed the archive layout again?): vulkan_sdk.zip" >&2 + echo "file vulkan_sdk.zip" >&2 + file vulkan_sdk.zip + echo "unzip -t vulkan_sdk.zip" >&2 + unzip -t vulkan_sdk.zip + exit 7 + fi + echo "recognized zip layout 'vulkan_sdk.zip' ${InstallVulkan}.app/Contents" >&2 + local sdk_temp=${VULKAN_SDK}.tmp + sudo ${InstallVulkan}.app/Contents/MacOS/${InstallVulkan} --root "$sdk_temp" --accept-licenses --default-answer --confirm-command install + du -hs $sdk_temp + test -d $sdk_temp/macOS || { echo "unrecognized dmg folder layout: $sdk_temp" ; ls -l $sdk_temp ; exit 10 ; } + cp -r $sdk_temp/macOS/* $VULKAN_SDK/ + if [[ -d ${InstallVulkan}.app/Contents ]] ; then + sudo rm -rf "$sdk_temp" + rm -rf ${InstallVulkan}.app + fi +} diff --git a/.ci/vulkan_setup_env.sh b/.ci/vulkan_setup_env.sh new file mode 100644 index 0000000000..fbd6bb8202 --- /dev/null +++ b/.ci/vulkan_setup_env.sh @@ -0,0 +1,55 @@ +# MIT License + +# Copyright (c) 2022 humbletim + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# helper functions for downloading/installing platform-specific Vulkan SDKs +# originally meant for use from GitHub Actions +# see: https://github.com/humbletim/install-vulkan-sdk +# -- humbletim 2022.02 + +function preset_env() { + basedir=$PWD + runner_os=${RUNNER_OS:-`uname -s`} + case $runner_os in + macOS|Darwin) os=mac ;; + Linux) os=linux ;; + Windows|MINGW*) os=windows ; basedir=$(pwd -W) ;; + *) echo "unknown runner_os: $runner_os" ; exit 7 ; ;; + esac + version='latest' # hi hello + sdk_dir=${VULKAN_SDK:-$basedir/VULKAN_SDK} + test -d $sdk_dir || mkdir -pv $sdk_dir + if [[ $version == 'latest' ]] ; then + url=https://vulkan.lunarg.com/sdk/latest/$os.txt + echo "note: resolving '$version' for '$os' via webservices lookup: $url" >&2 + version=$(curl -sL $url) + test -n "$version" || { echo "could not resolve latest version" ; exit 9 ; } + echo "::notice title=Using Vulkan SDK $version::resolved via '$url'" + fi + + export VULKAN_SDK="$sdk_dir" + export VULKAN_SDK_VERSION="$version" + export VULKAN_SDK_PLATFORM="$os" + + echo "VULKAN_SDK=$sdk_dir" >> "$GITHUB_ENV" + echo "VULKAN_SDK_VERSION=$version" >> "$GITHUB_ENV" + echo "VULKAN_SDK_PLATFORM=$os" >> "$GITHUB_ENV" +} \ No newline at end of file diff --git a/.github/workflows/ci-workflow.yml b/.github/workflows/ci-workflow.yml index 40f5b69def..769fc2938f 100644 --- a/.github/workflows/ci-workflow.yml +++ b/.github/workflows/ci-workflow.yml @@ -35,13 +35,22 @@ jobs: with: repository: etternagame/CrashpadTools path: tools + + - name: Install Vulkan SDK + shell: bash + run: | + . main/.ci/vulkan_setup_env.sh + preset_env + . main/.ci/vulkan_prebuilt_helpers.sh + download_vulkan_installer ${VULKAN_SDK_PLATFORM} + unpack_vulkan_installer ${VULKAN_SDK_PLATFORM} - name: Install apt packages run: sudo apt update && sudo apt install ${{ matrix.cfg.cpp-version }} nasm ninja-build libglew-dev libxrandr-dev libxtst-dev libpulse-dev libasound-dev libogg-dev libvorbis-dev xorg-dev libcurl4-openssl-dev - name: Install OpenSSL 3.5 run: mkdir -p tmp/openssl && cd tmp/openssl && wget https://github.com/openssl/openssl/releases/download/openssl-3.5.1/openssl-3.5.1.tar.gz && tar -xf openssl-3.5.1.tar.gz && cd openssl-3.5.1 && ./Configure --prefix="${HOME}/toolchain" && make -j `nproc` && make test -j `nproc` && sudo make install - name: Generate CMake - run: mkdir main/build && cd main/build && cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_TESTS=FALSE -DBUILD_EXAMPLES=FALSE -DOPENSSL_ROOT_DIR=~/toolchain/ .. + run: mkdir main/build && cd main/build && cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_TESTS=FALSE -DBUILD_EXAMPLES=FALSE -DOPENSSL_ROOT_DIR=~/toolchain/ -DCMAKE_CXX_FLAGS="-D_DEBUG" .. env: CXX: ${{matrix.cfg.cpp-version}} @@ -123,6 +132,15 @@ jobs: repository: etternagame/CrashpadTools path: tools + - name: Install Vulkan SDK + shell: bash + run: | + . main/.ci/vulkan_setup_env.sh + preset_env + . main/.ci/vulkan_prebuilt_helpers.sh + download_vulkan_installer ${VULKAN_SDK_PLATFORM} + unpack_vulkan_installer ${VULKAN_SDK_PLATFORM} + - name: Install homebrew packages run: brew install cmake nasm ninja openssl @@ -234,6 +252,15 @@ jobs: uses: actions/setup-python@v6 with: python-version: '3.12' + + - name: Install Vulkan SDK + shell: bash + run: | + . main/.ci/vulkan_setup_env.sh + preset_env + . main/.ci/vulkan_prebuilt_helpers.sh + download_vulkan_installer ${VULKAN_SDK_PLATFORM} + unpack_vulkan_installer ${VULKAN_SDK_PLATFORM} - name: Update CPack Environment if: ${{matrix.cfg.dist}} @@ -325,6 +352,15 @@ jobs: repository: etternagame/CrashpadTools path: tools + - name: Install Vulkan SDK + shell: bash + run: | + . main/.ci/vulkan_setup_env.sh + preset_env + . main/.ci/vulkan_prebuilt_helpers.sh + download_vulkan_installer ${VULKAN_SDK_PLATFORM} + unpack_vulkan_installer ${VULKAN_SDK_PLATFORM} + - name: Install homebrew packages run: brew install cmake nasm ninja openssl && brew tap etternagame/etterna && brew update && HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 @@ -420,6 +456,15 @@ jobs: repository: nico-abram/dxsdk path: dxsdk + - name: Install Vulkan SDK + shell: bash + run: | + . main/.ci/vulkan_setup_env.sh + preset_env + . main/.ci/vulkan_prebuilt_helpers.sh + download_vulkan_installer ${VULKAN_SDK_PLATFORM} + unpack_vulkan_installer ${VULKAN_SDK_PLATFORM} + - name: Update Environment Variables run: | echo "${{github.workspace}}/tools/win/" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append diff --git a/CMakeLists.txt b/CMakeLists.txt index 08c0195fb6..41d1b7601f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,7 +15,8 @@ set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) # Tell CMake to use the new policy for CMP ## We statically link the CRT by default. If you would like to dynamically ## link the CRT, you can append "DLL" to the end of the variable below. ## More Info: https://cmake.org/cmake/help/latest/variable/CMAKE_MSVC_RUNTIME_LIBRARY.html -set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") + +set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>DLL") # PROJECT WIDE SETUP project(Etterna @@ -42,6 +43,7 @@ add_executable(Etterna) # Project Compile Options set(WITH_CRASHPAD TRUE CACHE BOOL "Compile with Crash Handler (Requires depot_tools installed)") +set(WITH_VULKAN TRUE CACHE BOOL "Compile Vulkan support (Requires Vulkan SDK to be installed and VULKAN_SDK environment variable to be set to SDK root dir)") ## Setting Target Properties ### Set a different name for each output binary depending on what build configuration is. @@ -115,8 +117,19 @@ target_link_libraries(Etterna PRIVATE plog::plog) target_link_libraries(Etterna PRIVATE nowide::nowide) target_link_libraries(Etterna PRIVATE ghc_filesystem) -if(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "arm64") - target_link_libraries(Etterna PRIVATE sse2neon) +if(WITH_VULKAN) + add_compile_definitions(Etterna PRIVATE WITH_VULKAN) + target_link_libraries(Etterna PRIVATE vk-bootstrap::vk-bootstrap) + target_link_libraries(Etterna PRIVATE VulkanMemoryAllocator) + + find_package(Vulkan REQUIRED COMPONENTS shaderc_combined) + target_link_libraries(Etterna PRIVATE + Vulkan::Vulkan + Vulkan::shaderc_combined) + + if(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "arm64") + target_link_libraries(Etterna PRIVATE sse2neon) + endif() endif() # If the user wants crashpad, and the target exists (in-case diff --git a/Data/Shaders/Vulkan/fragment.glsl b/Data/Shaders/Vulkan/fragment.glsl new file mode 100644 index 0000000000..07a9a46a24 --- /dev/null +++ b/Data/Shaders/Vulkan/fragment.glsl @@ -0,0 +1,23 @@ +#version 460 +#extension GL_EXT_nonuniform_qualifier : enable +#extension GL_EXT_samplerless_texture_functions : enable + +layout(set = 0, binding = 2) uniform texture2D textures[]; +layout(set = 0, binding = 3) uniform sampler samplers[]; + +layout(location = 0) in vec4 vertexColor; +layout(location = 1) flat in uint textureIndex; +layout (location = 2) flat in uint samplerIndex; +layout(location = 3) in vec2 vertexUV; + +layout(location = 0) out vec4 fragmentColor; + +void main() { + if(textureIndex == 0){ + fragmentColor = vertexColor; + return; + } + + vec4 textureColor = texture(sampler2D(textures[nonuniformEXT(textureIndex)], samplers[nonuniformEXT(samplerIndex)]), vertexUV); + fragmentColor = vertexColor * textureColor; +} \ No newline at end of file diff --git a/Data/Shaders/Vulkan/vertex.glsl b/Data/Shaders/Vulkan/vertex.glsl new file mode 100644 index 0000000000..ba75352a7a --- /dev/null +++ b/Data/Shaders/Vulkan/vertex.glsl @@ -0,0 +1,70 @@ +#version 460 +#extension GL_EXT_nonuniform_qualifier : enable + +struct Vertex { + float pos[3]; + float normal[3]; + uint color; + float uv[2]; + uint MatrixIndex; + uint TextureIndex; + uint SamplerIndex; +}; + +layout(std430, set = 0, binding = 0) readonly buffer VertexBuffer { + Vertex vertices[]; +}; + +struct MatrixState { + mat4 wvp; + mat4 texture; +}; + +layout(std430, set = 0, binding = 1) readonly buffer MatrixStateBuffer { + MatrixState matrices[]; +}; + +layout(location = 0) out vec4 vertexColor; +layout(location = 1) out uint textureIndex; +layout(location = 2) out uint samplerIndex; +layout(location = 3) out vec2 vertexUV; + +vec2 unpackVec2(float array[2]){ + return vec2(array[0], array[1]); +} + +vec3 unpackVec3(float array[3]){ + return vec3(array[0], array[1], array[2]); +} + +vec4 unpackColor(uint c) +{ + float b = float(c & 0xFFu); + float g = float((c >> 8) & 0xFFu); + float r = float((c >> 16) & 0xFFu); + float a = float((c >> 24) & 0xFFu); + + return vec4(r, g, b, a) / 255.0; +} + +void main() { + Vertex currentVertex = vertices[gl_VertexIndex]; + + vertexColor = unpackColor(currentVertex.color); + + textureIndex = currentVertex.TextureIndex; + samplerIndex = currentVertex.SamplerIndex; + + uint matrixIndex = currentVertex.MatrixIndex; + mat4 wvp = matrices[matrixIndex].wvp; + mat4 tex = matrices[matrixIndex].texture; + + vec4 pos = wvp * vec4(unpackVec3(currentVertex.pos), 1.0); + pos.z = pos.z * 0.5 + 0.5; // [-1; 1] -> [0; 1] because reasons + + gl_Position = pos; + + vertexUV = unpackVec2(currentVertex.uv); + vertexUV.x += tex[3][0]; + vertexUV.y += tex[3][1]; +} \ No newline at end of file diff --git a/Docs/Building.md b/Docs/Building.md index 965706a7fb..65947bd72e 100644 --- a/Docs/Building.md +++ b/Docs/Building.md @@ -52,6 +52,8 @@ cmake -DOPENSSL_ROOT_DIR="/usr/local/opt/openssl" -G "Xcode" .. # macOS - macOS: `brew install openssl` - Windows: A CMake compatible version of OpenSSL is available at [Shining Light Productions](https://slproweb.com/products/Win32OpenSSL.html) website. You will need the 32bit and 64bit installers if you plan on building both versions. It's recommended to uninstall old versions to make sure CMake can find the correct latest version. Direct links: [32bit](https://slproweb.com/download/Win32OpenSSL-1_1_1t.exe), [64bit](https://slproweb.com/download/Win64OpenSSL-1_1_1t.exe). If these links are dead, look for the OpenSSL v1.1.1 install links (EXE or MSI) on the Shining Light Productions site. Typically we use the full versions rather than the Light versions. - [depot_tools](https://dev.chromium.org/developers/how-tos/install-depot-tools) - Installation is platform specific. To skip installing this, follow the relevant instructions in [CLI Project Generation](CLI-Project-Generation). +- [Vulkan SDK](https://vulkan.lunarg.com/sdk/home) - note that you might need to set the `VULKAN_SDK` environment variable +to the SDK's root directory [for CMake to find](https://cmake.org/cmake/help/latest/module/FindVulkan.html#hints). ### Linux Dependencies diff --git a/extern/CMakeLists.txt b/extern/CMakeLists.txt index cfab5a57f0..911feb3f5a 100644 --- a/extern/CMakeLists.txt +++ b/extern/CMakeLists.txt @@ -73,6 +73,12 @@ add_subdirectory(newogg) add_subdirectory(newvorbis) target_link_libraries(vorbis ogg) +if(WITH_VULKAN) + add_subdirectory(vk-bootstrap) + add_subdirectory(VulkanMemoryAllocator) + list(APPEND EXT_TARGETS vk-bootstrap VulkanMemoryAllocator) +endif() + # Only enable crashpad if the user wants it. if(WITH_CRASHPAD) add_subdirectory(crashpad) diff --git a/extern/VulkanMemoryAllocator/CMakeLists.txt b/extern/VulkanMemoryAllocator/CMakeLists.txt new file mode 100644 index 0000000000..1547f472df --- /dev/null +++ b/extern/VulkanMemoryAllocator/CMakeLists.txt @@ -0,0 +1,72 @@ +# +# Copyright (c) 2017-2026 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +cmake_minimum_required(VERSION 3.15...3.26) + +project(VMA VERSION 3.3.0 LANGUAGES CXX) + +add_library(VulkanMemoryAllocator INTERFACE) +add_library(GPUOpen::VulkanMemoryAllocator ALIAS VulkanMemoryAllocator) +add_library(VulkanMemoryAllocator::Headers ALIAS VulkanMemoryAllocator) + +target_include_directories(VulkanMemoryAllocator INTERFACE $) + +if (CMAKE_VERSION VERSION_LESS "3.21") + # https://cmake.org/cmake/help/latest/variable/PROJECT_IS_TOP_LEVEL.html + string(COMPARE EQUAL ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_SOURCE_DIR} PROJECT_IS_TOP_LEVEL) +endif() + +option(VMA_ENABLE_INSTALL "Install VulkanMemoryAllocator" ${PROJECT_IS_TOP_LEVEL}) +if (VMA_ENABLE_INSTALL) + include(GNUInstallDirs) + include(CMakePackageConfigHelpers) + + install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + install(TARGETS VulkanMemoryAllocator EXPORT VulkanMemoryAllocatorConfig INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + install(EXPORT VulkanMemoryAllocatorConfig NAMESPACE "GPUOpen::" DESTINATION "share/cmake/VulkanMemoryAllocator") + + write_basic_package_version_file(VulkanMemoryAllocatorConfigVersion.cmake COMPATIBILITY SameMajorVersion ARCH_INDEPENDENT) + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/VulkanMemoryAllocatorConfigVersion.cmake" DESTINATION "share/cmake/VulkanMemoryAllocator") + + option(VMA_BUILD_DOCUMENTATION "Create and install the HTML based API documentation") + if(VMA_BUILD_DOCUMENTATION) + find_package(Doxygen REQUIRED) + # set input and output files + set(DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile) + set(DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile) + # request to configure the file + configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY) + # note the option ALL which allows to build the docs together with the application + add_custom_target(doc_doxygen ALL + COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating API documentation with Doxygen" + VERBATIM + ) + install(DIRECTORY docs/ DESTINATION "${CMAKE_INSTALL_DATADIR}/doc/VulkanMemoryAllocator" PATTERN ".nojekyll" EXCLUDE) + endif() + + option(VMA_BUILD_SAMPLES "Build samples") + if (VMA_BUILD_SAMPLES) + add_subdirectory(src) + endif() +endif() diff --git a/extern/VulkanMemoryAllocator/LICENSE.txt b/extern/VulkanMemoryAllocator/LICENSE.txt new file mode 100644 index 0000000000..be95175d6a --- /dev/null +++ b/extern/VulkanMemoryAllocator/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017-2026 Advanced Micro Devices, Inc. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/extern/VulkanMemoryAllocator/README.md b/extern/VulkanMemoryAllocator/README.md new file mode 100644 index 0000000000..aa0c4c3fa3 --- /dev/null +++ b/extern/VulkanMemoryAllocator/README.md @@ -0,0 +1,191 @@ +# Vulkan Memory Allocator + +Easy to integrate Vulkan memory allocation library. + +**Documentation:** Browse online: [Vulkan Memory Allocator](https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/) (generated from Doxygen-style comments in [include/vk_mem_alloc.h](include/vk_mem_alloc.h)) + +**License:** MIT. See [LICENSE.txt](LICENSE.txt) + +**Changelog:** See [CHANGELOG.md](CHANGELOG.md) + +**Product page:** [Vulkan Memory Allocator on GPUOpen](https://gpuopen.com/vulkan-memory-allocator/) + +[![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.svg)](http://isitmaintained.com/project/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator "Average time to resolve an issue") + +# Problem + +Memory allocation and resource (buffer and image) creation in Vulkan is difficult (comparing to older graphics APIs, like D3D11 or OpenGL) for several reasons: + +- It requires a lot of boilerplate code, just like everything else in Vulkan, because it is a low-level and high-performance API. +- There is additional level of indirection: `VkDeviceMemory` is allocated separately from creating `VkBuffer`/`VkImage` and they must be bound together. +- Driver must be queried for supported memory heaps and memory types. Different GPU vendors provide different types of it. +- It is recommended to allocate bigger chunks of memory and assign parts of them to particular resources, as there is a limit on maximum number of memory blocks that can be allocated. + +# Features + +This library can help game developers to manage memory allocations and resource creation by offering some higher-level functions: + +1. Functions that help to choose correct and optimal memory type based on intended usage of the memory. + - Required or preferred traits of the memory are expressed using higher-level description comparing to Vulkan flags. +2. Functions that allocate memory blocks, reserve and return parts of them (`VkDeviceMemory` + offset + size) to the user. + - Library keeps track of allocated memory blocks, used and unused ranges inside them, finds best matching unused ranges for new allocations, respects all the rules of alignment and buffer/image granularity. +3. Functions that can create an image/buffer, allocate memory for it and bind them together - all in one call. + +Additional features: + +- Well-documented - description of all functions and structures provided, along with chapters that contain general description and example code. +- Thread-safety: Library is designed to be used in multithreaded code. Access to a single device memory block referred by different buffers and textures (binding, mapping) is synchronized internally. Memory mapping is reference-counted. +- Configuration: Fill optional members of `VmaAllocatorCreateInfo` structure to provide custom CPU memory allocator, pointers to Vulkan functions and other parameters. +- Customization and integration with custom engines: Predefine appropriate macros to provide your own implementation of all external facilities used by the library like assert, mutex, atomic. +- Support for memory mapping, reference-counted internally. Support for persistently mapped memory: Just allocate with appropriate flag and access the pointer to already mapped memory. +- Support for non-coherent memory. Functions that flush/invalidate memory. `nonCoherentAtomSize` is respected automatically. +- Support for resource aliasing (overlap). +- Support for sparse binding and sparse residency: Convenience functions that allocate or free multiple memory pages at once. +- Custom memory pools: Create a pool with desired parameters (e.g. fixed or limited maximum size) and allocate memory out of it. +- Linear allocator: Create a pool with linear algorithm and use it for much faster allocations and deallocations in free-at-once, stack, double stack, or ring buffer fashion. +- Support for Vulkan 1.0...1.4. +- Support for extensions (and equivalent functionality included in new core Vulkan versions): + - VK_KHR_dedicated_allocation: Just enable it and it will be used automatically by the library. + - VK_KHR_bind_memory2. + - VK_KHR_maintenance4. + - VK_KHR_maintenance5, including `VkBufferUsageFlags2CreateInfoKHR`. + - VK_EXT_memory_budget: Used internally if available to query for current usage and budget. If not available, it falls back to an estimation based on memory heap sizes. + - VK_KHR_buffer_device_address: Flag `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR` is automatically added to memory allocations where needed. + - VK_EXT_memory_priority: Set `priority` of allocations or custom pools and it will be set automatically using this extension. + - VK_AMD_device_coherent_memory. + - VK_KHR_external_memory_win32. +- Defragmentation of GPU and CPU memory: Let the library move data around to free some memory blocks and make your allocations better compacted. +- Statistics: Obtain brief or detailed statistics about the amount of memory used, unused, number of allocated blocks, number of allocations etc. - globally, per memory heap, and per memory type. +- Debug annotations: Associate custom `void* pUserData` and debug `char* pName` with each allocation. +- JSON dump: Obtain a string in JSON format with detailed map of internal state, including list of allocations, their string names, and gaps between them. +- Convert this JSON dump into a picture to visualize your memory. See [tools/GpuMemDumpVis](tools/GpuMemDumpVis/README.md). +- Debugging incorrect memory usage: Enable initialization of all allocated memory with a bit pattern to detect usage of uninitialized or freed memory. Enable validation of a magic number after every allocation to detect out-of-bounds memory corruption. +- Support for interoperability with OpenGL, Direct3D, and other graphics APIs through external memory export. +- Virtual allocator: Interface for using core allocation algorithm to allocate any custom data, e.g. pieces of one large buffer. + +# Prerequisites + +- Self-contained C++ library in single header file. No external dependencies other than standard C and C++ library and of course Vulkan. Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used. +- Public interface in C, in same convention as Vulkan API. Implementation in C++. +- Error handling implemented by returning `VkResult` error codes - same way as in Vulkan. +- Interface documented using Doxygen-style comments. +- Platform-independent, but developed and tested on Windows using Visual Studio. Continuous integration setup for Windows and Linux. Used also on Android, MacOS, and other platforms. + +# Example + +Basic usage of this library is very simple. Advanced features are optional. After you created global `VmaAllocator` object, a complete code needed to create a buffer may look like this: + +```cpp +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +``` + +With this one function call: + +1. `VkBuffer` is created. +2. `VkDeviceMemory` block is allocated if needed. +3. An unused region of the memory block is bound to this buffer. + +`VmaAllocation` is an object that represents memory assigned to this buffer. It can be queried for parameters like `VkDeviceMemory` handle and offset. + +# How to build + +On Windows it is recommended to use [CMake GUI](https://cmake.org/runningcmake/). + +Alternatively you can generate/open a Visual Studio from the command line: + +```sh +# By default CMake picks the newest version of Visual Studio it can use +cmake -S . -B build -D VMA_BUILD_SAMPLES=ON +cmake --open build +``` + +On Linux: + +```sh +cmake -S . -B build +# Since VMA has no source files, you can skip to installation immediately +cmake --install build --prefix build/install +``` + +## How to use + +After calling either `find_package` or `add_subdirectory` simply link the library. +This automatically handles configuring the include directory. Example: + +```cmake +find_package(VulkanMemoryAllocator CONFIG REQUIRED) +target_link_libraries(YourGameEngine PRIVATE GPUOpen::VulkanMemoryAllocator) +``` + +For more info on using CMake visit the official [CMake documentation](https://cmake.org/cmake/help/latest/index.html). + +## Building using vcpkg + +You can download and install VulkanMemoryAllocator using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + ./vcpkg install vulkan-memory-allocator + +The VulkanMemoryAllocator port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. + +# Binaries + +The release comes with precompiled binary executable for "VulkanSample" application which contains test suite. It is compiled using Visual Studio 2022, so it requires appropriate libraries to work, including "MSVCP140.dll", "VCRUNTIME140.dll", "VCRUNTIME140_1.dll". If the launch fails with error message telling about those files missing, please download and install [Microsoft Visual C++ Redistributable](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads), "X64" version. + +# Read more + +See **[Documentation](https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/)**. + +# Software using this library + +- **[Blender](https://www.blender.org)** +- **[Qt Project](https://github.com/qt)** +- **[Baldur's Gate III](https://www.mobygames.com/game/150689/baldurs-gate-iii/credits/windows/?autoplatform=true)** +- **[Cyberpunk 2077](https://www.mobygames.com/game/128136/cyberpunk-2077/credits/windows/?autoplatform=true)** +- **[X-Plane](https://x-plane.com/)** +- **[Detroit: Become Human](https://gpuopen.com/learn/porting-detroit-3/)** +- **[Vulkan Samples](https://github.com/LunarG/VulkanSamples)** - official Khronos Vulkan samples. License: Apache-style. +- **[GFXReconstruct](https://github.com/LunarG/gfxreconstruct)** - a tools for the capture and replay of graphics API calls. License: MIT. +- **[Anvil](https://github.com/GPUOpen-LibrariesAndSDKs/Anvil)** - cross-platform framework for Vulkan. License: MIT. +- **[Filament](https://github.com/google/filament)** - physically based rendering engine for Android, Windows, Linux and macOS, from Google. Apache License 2.0. +- **[Atypical Games - proprietary game engine](https://developer.samsung.com/galaxy-gamedev/gamedev-blog/infinitejet.html)** +- **[Flax Engine](https://flaxengine.com/)** +- **[Godot Engine](https://github.com/godotengine/godot/)** - multi-platform 2D and 3D game engine. License: MIT. +- **[Lightweight Java Game Library (LWJGL)](https://www.lwjgl.org/)** - includes binding of the library for Java. License: BSD. +- **[LightweightVK](https://github.com/corporateshark/lightweightvk)** - lightweight C++ bindless Vulkan 1.3 wrapper. License: MIT. +- **[PowerVR SDK](https://github.com/powervr-graphics/Native_SDK)** - C++ cross-platform 3D graphics SDK, from Imagination. License: MIT. +- **[Skia](https://github.com/google/skia)** - complete 2D graphic library for drawing Text, Geometries, and Images, from Google. +- **[The Forge](https://github.com/ConfettiFX/The-Forge)** - cross-platform rendering framework. Apache License 2.0. +- **[VK9](https://github.com/disks86/VK9)** - Direct3D 9 compatibility layer using Vulkan. Zlib license. +- **[vkDOOM3](https://github.com/DustinHLand/vkDOOM3)** - Vulkan port of GPL DOOM 3 BFG Edition. License: GNU GPL. +- **[vkQuake2](https://github.com/kondrak/vkQuake2)** - vanilla Quake 2 with Vulkan support. License: GNU GPL. +- **[Vulkan Best Practice for Mobile Developers](https://github.com/ARM-software/vulkan_best_practice_for_mobile_developers)** from ARM. License: MIT. +- **[RPCS3](https://github.com/RPCS3/rpcs3)** - PlayStation 3 emulator/debugger. License: GNU GPLv2. +- **[PPSSPP](https://github.com/hrydgard/ppsspp)** - Playstation Portable emulator/debugger. License: GNU GPLv2+. +- **[Wicked Engine](https://github.com/turanszkij/WickedEngine)** - 3D engine with modern graphics + +[Many other projects on GitHub](https://github.com/search?q=AMD_VULKAN_MEMORY_ALLOCATOR_H&type=Code) and some game development studios that use Vulkan in their games. + +# See also + +- **[D3D12 Memory Allocator](https://github.com/GPUOpen-LibrariesAndSDKs/D3D12MemoryAllocator)** - equivalent library for Direct3D 12. License: MIT. +- **[Awesome Vulkan](https://github.com/vinjn/awesome-vulkan)** - a curated list of awesome Vulkan libraries, debuggers and resources. +- **[vcpkg](https://github.com/Microsoft/vcpkg)** dependency manager from Microsoft also offers a port of this library. +- **[VulkanMemoryAllocator-Hpp](https://github.com/YaaZ/VulkanMemoryAllocator-Hpp)** - C++ binding for this library. License: CC0-1.0. +- **[PyVMA](https://github.com/realitix/pyvma)** - Python wrapper for this library. Author: Jean-Sébastien B. (@realitix). License: Apache 2.0. +- **[vk-mem](https://github.com/gwihlidal/vk-mem-rs)** - Rust binding for this library. Author: Graham Wihlidal. License: Apache 2.0 or MIT. +- **[Haskell bindings](https://hackage.haskell.org/package/VulkanMemoryAllocator)**, **[github](https://github.com/expipiplus1/vulkan/tree/master/VulkanMemoryAllocator)** - Haskell bindings for this library. Author: Ellie Hermaszewska (@expipiplus1). License BSD-3-Clause. +- **[vma_sample_sdl](https://github.com/rextimmy/vma_sample_sdl)** - SDL port of the sample app of this library (with the goal of running it on multiple platforms, including MacOS). Author: @rextimmy. License: MIT. +- **[vulkan-malloc](https://github.com/dylanede/vulkan-malloc)** - Vulkan memory allocation library for Rust. Based on version 1 of this library. Author: Dylan Ede (@dylanede). License: MIT / Apache 2.0. diff --git a/extern/VulkanMemoryAllocator/include/vk_mem_alloc.h b/extern/VulkanMemoryAllocator/include/vk_mem_alloc.h new file mode 100644 index 0000000000..2008f1ddb9 --- /dev/null +++ b/extern/VulkanMemoryAllocator/include/vk_mem_alloc.h @@ -0,0 +1,19875 @@ +// +// Copyright (c) 2017-2026 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H +#define AMD_VULKAN_MEMORY_ALLOCATOR_H + +/** \mainpage Vulkan Memory Allocator + +Version 3.4.0-development + +Copyright (c) 2017-2026 Advanced Micro Devices, Inc. All rights reserved. \n +License: MIT \n +See also: [product page on GPUOpen](https://gpuopen.com/vulkan-memory-allocator/), +[repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + + +API documentation divided into groups: [Topics](topics.html) + +General documentation chapters: + +- \subpage faq +- \subpage quick_start + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) +- \subpage choosing_memory_type + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) +- \subpage memory_mapping + - [Copy functions](@ref memory_mapping_copy_functions) + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) +- \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) +- \subpage resource_aliasing +- \subpage custom_memory_pools + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [When not to use custom pools](@ref custom_memory_pools_when_not_use) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) +- \subpage defragmentation +- \subpage statistics + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) +- \subpage allocation_annotation + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) +- \subpage virtual_allocator +- \subpage debugging_memory_usage + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - [Leak detection features](@ref debugging_memory_usage_leak_detection) +- \subpage other_api_interop + - [Exporting memory](@ref other_api_interop_exporting_memory) + - [Importing memory](@ref other_api_interop_importing_memory) +- \subpage usage_patterns + - [GPU-only resource](@ref usage_patterns_gpu_only) + - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) + - [Readback](@ref usage_patterns_readback) + - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) + - [Other use cases](@ref usage_patterns_other_use_cases) +- \subpage configuration + - [Pointers to Vulkan functions](@ref config_Vulkan_functions) + - [Custom host memory allocator](@ref custom_memory_allocator) + - [Device memory allocation callbacks](@ref allocation_callbacks) + - [Device heap memory limit](@ref heap_memory_limit) +- Extension support + - \subpage vk_khr_dedicated_allocation + - \subpage enabling_buffer_device_address + - \subpage vk_ext_memory_priority + - \subpage vk_amd_device_coherent_memory +- \subpage general_considerations + - [Thread safety](@ref general_considerations_thread_safety) + - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) + - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) + - [Allocation algorithm](@ref general_considerations_allocation_algorithm) + - [Features not supported](@ref general_considerations_features_not_supported) + +\defgroup group_init Library initialization + +\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. + +\defgroup group_alloc Memory allocation + +\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. +Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). + +\defgroup group_virtual Virtual allocator + +\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm +for user-defined purpose without allocating any real GPU memory. + +\defgroup group_stats Statistics + +\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. +See documentation chapter: \ref statistics. +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(VULKAN_H_) +#include +#endif + +#define VMA_VERSION (VK_MAKE_VERSION(3, 4, 0)) + +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_4) + #define VMA_VULKAN_VERSION 1004000 + #elif defined(VK_VERSION_1_3) + #define VMA_VULKAN_VERSION 1003000 + #elif defined(VK_VERSION_1_2) + #define VMA_VULKAN_VERSION 1002000 + #elif defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif +#endif + +#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + extern PFN_vkAllocateMemory vkAllocateMemory; + extern PFN_vkFreeMemory vkFreeMemory; + extern PFN_vkMapMemory vkMapMemory; + extern PFN_vkUnmapMemory vkUnmapMemory; + extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + extern PFN_vkBindBufferMemory vkBindBufferMemory; + extern PFN_vkBindImageMemory vkBindImageMemory; + extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + extern PFN_vkCreateBuffer vkCreateBuffer; + extern PFN_vkDestroyBuffer vkDestroyBuffer; + extern PFN_vkCreateImage vkCreateImage; + extern PFN_vkDestroyImage vkDestroyImage; + extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + #if VMA_VULKAN_VERSION >= 1001000 + extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; + extern PFN_vkBindImageMemory2 vkBindImageMemory2; + extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; + #endif // #if VMA_VULKAN_VERSION >= 1001000 +#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES + +#if !defined(VMA_DEDICATED_ALLOCATION) + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. +#if !defined(VMA_BUFFER_DEVICE_ADDRESS) + #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 + #define VMA_BUFFER_DEVICE_ADDRESS 1 + #else + #define VMA_BUFFER_DEVICE_ADDRESS 0 + #endif +#endif + +// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. +#if !defined(VMA_MEMORY_PRIORITY) + #if VK_EXT_memory_priority + #define VMA_MEMORY_PRIORITY 1 + #else + #define VMA_MEMORY_PRIORITY 0 + #endif +#endif + +// Defined to 1 when VK_KHR_maintenance4 device extension is defined in Vulkan headers. +#if !defined(VMA_KHR_MAINTENANCE4) + #if VK_KHR_maintenance4 + #define VMA_KHR_MAINTENANCE4 1 + #else + #define VMA_KHR_MAINTENANCE4 0 + #endif +#endif + +// Defined to 1 when VK_KHR_maintenance5 device extension is defined in Vulkan headers. +#if !defined(VMA_KHR_MAINTENANCE5) + #if VK_KHR_maintenance5 + #define VMA_KHR_MAINTENANCE5 1 + #else + #define VMA_KHR_MAINTENANCE5 0 + #endif +#endif + + +// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. +#if !defined(VMA_EXTERNAL_MEMORY) + #if VK_KHR_external_memory + #define VMA_EXTERNAL_MEMORY 1 + #else + #define VMA_EXTERNAL_MEMORY 0 + #endif +#endif + +// Defined to 1 when VK_KHR_external_memory_win32 device extension is defined in Vulkan headers. +#if !defined(VMA_EXTERNAL_MEMORY_WIN32) + #if VK_KHR_external_memory_win32 + #define VMA_EXTERNAL_MEMORY_WIN32 1 + #else + #define VMA_EXTERNAL_MEMORY_WIN32 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporting the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE +#endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST +#endif + +// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan +// structure that will be extended via the pNext chain. +#ifndef VMA_EXTENDS_VK_STRUCT + #define VMA_EXTENDS_VK_STRUCT(vkStruct) +#endif + +// Define this macro to decorate pointers with an attribute specifying the +// length of the array they point to if they are not null. +// +// The length may be one of +// - The name of another parameter in the argument list where the pointer is declared +// - The name of another member in the struct where the pointer is declared +// - The name of a member of a struct type, meaning the value of that member in +// the context of the call. For example +// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), +// this means the number of memory heaps available in the device associated +// with the VmaAllocator being dealt with. +#ifndef VMA_LEN_IF_NOT_NULL + #define VMA_LEN_IF_NOT_NULL(len) +#endif + +// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nullable +#ifndef VMA_NULLABLE + #ifdef __clang__ + #define VMA_NULLABLE _Nullable + #else + #define VMA_NULLABLE + #endif +#endif + +// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull +#ifndef VMA_NOT_NULL + #ifdef __clang__ + #define VMA_NOT_NULL _Nonnull + #else + #define VMA_NOT_NULL + #endif +#endif + +// If non-dispatchable handles are represented as pointers then we can give +// then nullability annotations +#ifndef VMA_NOT_NULL_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL + #else + #define VMA_NOT_NULL_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_NULLABLE_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE + #else + #define VMA_NULLABLE_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_STATS_STRING_ENABLED + #define VMA_STATS_STRING_ENABLED 1 +#endif + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// INTERFACE +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. +#ifndef _VMA_ENUM_DECLARATIONS + +/** +\addtogroup group_init +@{ +*/ + +/// Flags for created #VmaAllocator. +typedef enum VmaAllocatorCreateFlagBits +{ + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. + + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + Using this extension will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) + + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enables usage of VK_AMD_device_coherent_memory extension. + + You may set this flag only if you: + + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. + + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, + /** + Enables usage of "buffer device address" feature, which allows you to use function + `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. + + You may set this flag only if you: + + 1. (For Vulkan version < 1.2) Found as available and enabled device extension + VK_KHR_buffer_device_address. + This extension is promoted to core Vulkan 1.2. + 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. + + When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. + The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to + allocated memory blocks wherever it might be needed. + + For more information, see documentation chapter \ref enabling_buffer_device_address. + */ + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, + /** + Enables usage of VK_EXT_memory_priority extension in the library. + + You may set this flag only if you found available and enabled this device extension, + along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + + When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority + are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. + + A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + Larger values are higher priority. The granularity of the priorities is implementation-dependent. + It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. + The value to be used for default priority is 0.5. + For more details, see the documentation of the VK_EXT_memory_priority extension. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, + /** + Enables usage of VK_KHR_maintenance4 extension in the library. + + You may set this flag only if you found available and enabled this device extension, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + */ + VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT = 0x00000080, + /** + Enables usage of VK_KHR_maintenance5 extension in the library. + + You should set this flag if you found available and enabled this device extension, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + */ + VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT = 0x00000100, + + /** + Enables usage of VK_KHR_external_memory_win32 extension in the library. + + You should set this flag if you found available and enabled this device extension, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + For more information, see \ref other_api_interop. + */ + VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT = 0x00000200, + + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocatorCreateFlagBits; +/// See #VmaAllocatorCreateFlagBits. +typedef VkFlags VmaAllocatorCreateFlags; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/// \brief Intended usage of the allocated memory. +typedef enum VmaMemoryUsage +{ + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** + Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + /** + Selects best memory type automatically. + This flag is recommended for most common use cases. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO = 7, + /** + Selects best memory type automatically with preference for GPU (device) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, + /** + Selects best memory type automatically with preference for CPU (host) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, + + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF +} VmaMemoryUsage; + +/// Flags to be passed as VmaAllocationCreateInfo::flags. +typedef enum VmaAllocationCreateFlagBits +{ + /** \brief Set this flag if the allocation should have its own memory block. + + Use it for special, big resources, like fullscreen images used as attachments. + + If you use this flag while creating a buffer or an image, `VkMemoryDedicatedAllocateInfo` + structure is applied if possible. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + + It is valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. + + Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pName`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + + If you want to make sure the new buffer/image is not tied to the new memory allocation + through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, + use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + /** \brief Set this flag if the allocated memory will have aliasing resources. + + Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. + Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. + */ + VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, + never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. + + \warning Violating this declaration may work correctly, but will likely be very slow. + Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` + Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory can be read, written, and accessed in random order, + so a `HOST_CACHED` memory type is preferred. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, + /** + Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, + it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected + if it may improve performance. + + By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type + (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and + issue an explicit transfer to write/read your data. + To prepare for this possibility, don't forget to add appropriate flags like + `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, + /** Allocation strategy that chooses smallest possible free range for the allocation + to minimize memory usage and fragmentation, possibly at the expense of allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, + /** Allocation strategy that chooses first suitable free range for the allocation - + not necessarily in terms of the smallest offset but the one that is easiest and fastest to find + to minimize allocation time, possibly at the expense of allocation quality. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + Used internally by defragmentation, not recommended in typical usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocationCreateFlagBits; +/// See #VmaAllocationCreateFlagBits. +typedef VkFlags VmaAllocationCreateFlags; + +/// Flags to be passed as VmaPoolCreateInfo::flags. +typedef enum VmaPoolCreateFlagBits +{ + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + + /** \brief Enables alternative, linear allocation algorithm in this pool. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaPoolCreateFlagBits; +/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. +typedef VkFlags VmaPoolCreateFlags; + +/// Flags to be passed as VmaDefragmentationInfo::flags. +typedef enum VmaDefragmentationFlagBits +{ + /* \brief Use simple but fast algorithm for defragmentation. + May not achieve best results but will require least time to compute and least allocations to copy. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, + /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. + Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, + /* \brief Perform full defragmentation of memory. + Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, + /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. + Only available when bufferImageGranularity is greater than 1, since it aims to reduce + alignment issues between different types of resources. + Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, + + /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, + + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaDefragmentationFlagBits; +/// See #VmaDefragmentationFlagBits. +typedef VkFlags VmaDefragmentationFlags; + +/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. +typedef enum VmaDefragmentationMoveOperation +{ + /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). + VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, + /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. + VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, + /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. + VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, +} VmaDefragmentationMoveOperation; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. +typedef enum VmaVirtualBlockCreateFlagBits +{ + /** \brief Enables alternative, linear allocation algorithm in this virtual block. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, + + /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualBlockCreateFlagBits; +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. +typedef VkFlags VmaVirtualBlockCreateFlags; + +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. +typedef enum VmaVirtualAllocationCreateFlagBits +{ + /** \brief Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, + /** \brief Allocation strategy that tries to minimize memory usage. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** \brief Allocation strategy that tries to minimize allocation time. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. + + These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, + + VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualAllocationCreateFlagBits; +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. +typedef VkFlags VmaVirtualAllocationCreateFlags; + +/** @} */ + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_DATA_TYPES_DECLARATIONS + +/** +\addtogroup group_init +@{ */ + +/** \struct VmaAllocator +\brief Represents main object of this library initialized. + +Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. +Call function vmaDestroyAllocator() to destroy it. + +It is recommended to create just one object of this type per `VkDevice` object, +right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. +*/ +VK_DEFINE_HANDLE(VmaAllocator) + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \struct VmaPool +\brief Represents custom memory pool + +Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. +Call function vmaDestroyPool() to destroy it. + +For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). +*/ +VK_DEFINE_HANDLE(VmaPool) + +/** \struct VmaAllocation +\brief Represents single memory allocation. + +It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type +plus unique offset. + +There are multiple ways to create such object. +You need to fill structure VmaAllocationCreateInfo. +For more information see [Choosing memory type](@ref choosing_memory_type). + +Although the library provides convenience functions that create Vulkan buffer or image, +allocate memory for it and bind them together, +binding of the allocation to a buffer or an image is out of scope of the allocation itself. +Allocation object can exist without buffer/image bound, +binding can be done manually by the user, and destruction of it can be done +independently of destruction of the allocation. + +The object also remembers its size and some other information. +To retrieve this information, use function vmaGetAllocationInfo() and inspect +returned structure VmaAllocationInfo. +*/ +VK_DEFINE_HANDLE(VmaAllocation) + +/** \struct VmaDefragmentationContext +\brief An opaque object that represents started defragmentation process. + +Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. +Call function vmaEndDefragmentation() to destroy it. +*/ +VK_DEFINE_HANDLE(VmaDefragmentationContext) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualAllocation +\brief Represents single memory allocation done inside VmaVirtualBlock. + +Use it as a unique identifier to virtual allocation within the single block. + +Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. +*/ +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualBlock +\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. + +Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. +For more information, see documentation chapter \ref virtual_allocator. + +This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. +*/ +VK_DEFINE_HANDLE(VmaVirtualBlock) + +/** @} */ + +/** +\addtogroup group_init +@{ +*/ + +/// Callback function called after successful vkAllocateMemory. +typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/// Callback function called before vkFreeMemory. +typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + +Provided for informative purpose, e.g. to gather statistics about number of +allocations or total amount of memory allocated in Vulkan. + +Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. +*/ +typedef struct VmaDeviceMemoryCallbacks +{ + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; + /// Optional, can be null. + void* VMA_NULLABLE pUserData; +} VmaDeviceMemoryCallbacks; + +/** \brief Pointers to some Vulkan functions - a subset used by the library. + +Used in VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +typedef struct VmaVulkanFunctions +{ + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; + PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; + PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; + PFN_vkMapMemory VMA_NULLABLE vkMapMemory; + PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; + PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; + PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; + PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; + PFN_vkCreateImage VMA_NULLABLE vkCreateImage; + PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; + PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; + /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; + /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + /// Fetch from "vkGetPhysicalDeviceMemoryProperties2" on Vulkan >= 1.1, but you can also fetch it from "vkGetPhysicalDeviceMemoryProperties2KHR" if you enabled extension VK_KHR_get_physical_device_properties2. + PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; +#endif +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceBufferMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; + /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceImageMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceImageMemoryRequirements; +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + PFN_vkGetMemoryWin32HandleKHR VMA_NULLABLE vkGetMemoryWin32HandleKHR; +#else + void* VMA_NULLABLE vkGetMemoryWin32HandleKHR; +#endif +} VmaVulkanFunctions; + +/// Description of a Allocator to be created. +typedef struct VmaAllocatorCreateInfo +{ + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice VMA_NOT_NULL device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; + + /** \brief Pointers to Vulkan functions. Can be null. + + For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). + */ + const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; + /** \brief Handle to Vulkan instance object. + + Starting from version 3.0.0 this member is no longer optional, it must be set! + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Optional. Vulkan version that the application uses. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + Only versions 1.0...1.4 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + It must match the Vulkan version used by the application and supported on the selected physical device, + so it must be no higher than `VkApplicationInfo::apiVersion` passed to `vkCreateInstance` + and no higher than `VkPhysicalDeviceProperties::apiVersion` found on the physical device used. + */ + uint32_t vulkanApiVersion; +#if VMA_EXTERNAL_MEMORY + /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. + + If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` + elements, defining external memory handle types of particular Vulkan memory type, + to be passed using `VkExportMemoryAllocateInfoKHR`. + + Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. + This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. + */ + const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; +#endif // #if VMA_EXTERNAL_MEMORY +} VmaAllocatorCreateInfo; + +/// Information about existing #VmaAllocator object. +typedef struct VmaAllocatorInfo +{ + /** \brief Handle to Vulkan instance object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::instance. + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Handle to Vulkan physical device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. + */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /** \brief Handle to Vulkan device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::device. + */ + VkDevice VMA_NOT_NULL device; +} VmaAllocatorInfo; + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. + +These are fast to calculate. +See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). +*/ +typedef struct VmaStatistics +{ + /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. + */ + uint32_t blockCount; + /** \brief Number of #VmaAllocation objects allocated. + + Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. + */ + uint32_t allocationCount; + /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. + + \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object + (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls + "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. + */ + VkDeviceSize blockBytes; + /** \brief Total number of bytes occupied by all #VmaAllocation objects. + + Always less or equal than `blockBytes`. + Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan + but unused by any #VmaAllocation. + */ + VkDeviceSize allocationBytes; +} VmaStatistics; + +/** \brief More detailed statistics than #VmaStatistics. + +These are slower to calculate. Use for debugging purposes. +See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). + +Previous version of the statistics API provided averages, but they have been removed +because they can be easily calculated as: + +\code +VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; +VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; +VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; +\endcode +*/ +typedef struct VmaDetailedStatistics +{ + /// Basic statistics. + VmaStatistics statistics; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. + VkDeviceSize allocationSizeMin; + /// Largest allocation size. 0 if there are 0 allocations. + VkDeviceSize allocationSizeMax; + /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMin; + /// Largest empty range size. 0 if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMax; +} VmaDetailedStatistics; + +/** \brief General statistics from current state of the Allocator - +total memory usage across all memory heaps and types. + +These are slower to calculate. Use for debugging purposes. +See function vmaCalculateStatistics(). +*/ +typedef struct VmaTotalStatistics +{ + VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; + VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaDetailedStatistics total; +} VmaTotalStatistics; + +/** \brief Statistics of current memory usage and available budget for a specific memory heap. + +These are fast to calculate. +See function vmaGetHeapBudgets(). +*/ +typedef struct VmaBudget +{ + /** \brief Statistics fetched from the library. + */ + VmaStatistics statistics; + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, decided by the operating system. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Parameters of new #VmaAllocation. + +To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. +*/ +typedef struct VmaAllocationCreateInfo +{ + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. + + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* VMA_NULLABLE pUserData; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object + and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. + */ + float priority; + /** \brief Additional minimum alignment to be used for this allocation. Can be 0. + + Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. + + When creating a buffer or an image, specifying a custom alignment is not needed in most cases, + because Vulkan implementation inspects the `CreateInfo` structure (including intended usage flags) + and returns required alignment through functions like `vkGetBufferMemoryRequirements2`, which VMA automatically + uses and respects. + Extra alignment may be needed in some cases, like when using a buffer for acceleration structure scratch + (`VkPhysicalDeviceAccelerationStructurePropertiesKHR::minAccelerationStructureScratchOffsetAlignment`, see also issue #523) + or when doing interop with OpenGL. + */ + VkDeviceSize minAlignment; +} VmaAllocationCreateInfo; + +/// Describes parameter of created #VmaPool. +typedef struct VmaPoolCreateInfo +{ + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + In this case, the pool will also support dedicated allocations. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. + + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. + + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. + Otherwise, this variable is ignored. + */ + float priority; + /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. + + Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. + + When creating a buffer or an image, specifying a custom alignment is not needed in most cases, + because Vulkan implementation inspects the `CreateInfo` structure (including intended usage flags) + and returns required alignment through functions like `vkGetBufferMemoryRequirements2`, which VMA automatically + uses and respects. + Extra alignment may be needed in some cases, like when using a buffer for acceleration structure scratch + (`VkPhysicalDeviceAccelerationStructurePropertiesKHR::minAccelerationStructureScratchOffsetAlignment`, see also issue #523) + or when doing interop with OpenGL. + */ + VkDeviceSize minAllocationAlignment; + /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. + + Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. + It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. + Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. + + Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, + can be attached automatically by this library when using other, more convenient of its features. + */ + void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext; +} VmaPoolCreateInfo; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** +Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). + +There is also an extended version of this structure that carries additional parameters: #VmaAllocationInfo2. +*/ +typedef struct VmaAllocationInfo +{ + /** \brief Memory type index that this allocation was allocated from. + + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. + + Same memory object can be shared by multiple allocations. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; + /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. + + You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function + vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, + not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation + and apply this offset automatically. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. + + It never changes. + + \note Allocation size returned in this variable may be greater than the size + requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the + allocation is accessible for operations on memory e.g. using a pointer after + mapping with vmaMapMemory(), but operations on the resource e.g. using + `vkCmdCopyBuffer` must be limited to the size of the resource. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. + + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. + + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after the allocation is moved during \ref defragmentation. + */ + void* VMA_NULLABLE pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* VMA_NULLABLE pUserData; + /** \brief Custom allocation name that was set with vmaSetAllocationName(). + + It can change after call to vmaSetAllocationName() for this allocation. + + Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with + additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. + */ + const char* VMA_NULLABLE pName; +} VmaAllocationInfo; + +/// Extended parameters of a #VmaAllocation object that can be retrieved using function vmaGetAllocationInfo2(). +typedef struct VmaAllocationInfo2 +{ + /** \brief Basic parameters of the allocation. + + If you need only these, you can use function vmaGetAllocationInfo() and structure #VmaAllocationInfo instead. + */ + VmaAllocationInfo allocationInfo; + /** \brief Size of the `VkDeviceMemory` block that the allocation belongs to. + + In case of an allocation with dedicated memory, it will be equal to `allocationInfo.size`. + */ + VkDeviceSize blockSize; + /** \brief `VK_TRUE` if the allocation has dedicated memory, `VK_FALSE` if it was placed as part of a larger memory block. + + When `VK_TRUE`, it also means `VkMemoryDedicatedAllocateInfo` was used when creating the allocation + (if VK_KHR_dedicated_allocation extension or Vulkan version >= 1.1 is enabled). + */ + VkBool32 dedicatedMemory; +} VmaAllocationInfo2; + +/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass. + +Should return true if the defragmentation needs to stop current pass. +*/ +typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData); + +/** \brief Parameters for defragmentation. + +To be used with function vmaBeginDefragmentation(). +*/ +typedef struct VmaDefragmentationInfo +{ + /// \brief Use combination of #VmaDefragmentationFlagBits. + VmaDefragmentationFlags flags; + /** \brief Custom pool to be defragmented. + + If null then default pools will undergo defragmentation process. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. + + `0` means no limit. + */ + VkDeviceSize maxBytesPerPass; + /** \brief Maximum number of allocations that can be moved during single pass to a different place. + + `0` means no limit. + */ + uint32_t maxAllocationsPerPass; + /** \brief Optional custom callback for stopping vmaBeginDefragmentation(). + + Have to return true for breaking current defragmentation pass. + */ + PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback; + /// \brief Optional data to pass to custom callback for stopping pass of defragmentation. + void* VMA_NULLABLE pBreakCallbackUserData; +} VmaDefragmentationInfo; + +/// Single move of an allocation to be done for defragmentation. +typedef struct VmaDefragmentationMove +{ + /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. + VmaDefragmentationMoveOperation operation; + /// Allocation that should be moved. + VmaAllocation VMA_NOT_NULL srcAllocation; + /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. + + \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, + to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). + vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. + */ + VmaAllocation VMA_NOT_NULL dstTmpAllocation; +} VmaDefragmentationMove; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassMoveInfo +{ + /// Number of elements in the `pMoves` array. + uint32_t moveCount; + /** \brief Array of moves to be performed by the user in the current defragmentation pass. + + Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). + + For each element, you should: + + 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. + 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. + 3. Make sure these commands finished executing on the GPU. + 4. Destroy the old buffer/image. + + Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). + After this call, the allocation will point to the new place in memory. + + Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + + Alternatively, if you decide you want to completely remove the allocation: + + 1. Destroy its buffer/image. + 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + + Then, after vmaEndDefragmentationPass() the allocation will be freed. + */ + VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; +} VmaDefragmentationPassMoveInfo; + +/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). +typedef struct VmaDefragmentationStats +{ + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; +} VmaDefragmentationStats; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). +typedef struct VmaVirtualBlockCreateInfo +{ + /** \brief Total size of the virtual block. + + Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. + For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. + */ + VkDeviceSize size; + + /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. + */ + VmaVirtualBlockCreateFlags flags; + + /** \brief Custom CPU memory allocation callbacks. Optional. + + Optional, can be null. When specified, they will be used for all CPU-side memory allocations. + */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; +} VmaVirtualBlockCreateInfo; + +/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). +typedef struct VmaVirtualAllocationCreateInfo +{ + /** \brief Size of the allocation. + + Cannot be zero. + */ + VkDeviceSize size; + /** \brief Required alignment of the allocation. Optional. + + Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. + */ + VkDeviceSize alignment; + /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. + */ + VmaVirtualAllocationCreateFlags flags; + /** \brief Custom pointer to be associated with the allocation. Optional. + + It can be any value and can be used for user-defined purposes. It can be fetched or changed later. + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationCreateInfo; + +/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). +typedef struct VmaVirtualAllocationInfo +{ + /** \brief Offset of the allocation. + + Offset at which the allocation was made. + */ + VkDeviceSize offset; + /** \brief Size of the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::size. + */ + VkDeviceSize size; + /** \brief Custom pointer associated with the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationInfo; + +/** @} */ + +#endif // _VMA_DATA_TYPES_DECLARATIONS + +#ifndef _VMA_FUNCTION_HEADERS + +/** +\addtogroup group_init +@{ +*/ + +#ifdef VOLK_HEADER_VERSION +/** \brief Fully initializes `pDstVulkanFunctions` structure with Vulkan functions needed by VMA +using [volk library](https://github.com/zeux/volk). + +This function is defined in VMA header only if "volk.h" was included before it. + +To use this function properly: + +-# Initialize volk and Vulkan: + -# Call `volkInitialize()` + -# Create `VkInstance` object + -# Call `volkLoadInstance()` + -# Create `VkDevice` object + -# Call `volkLoadDevice()` +-# Fill in structure #VmaAllocatorCreateInfo, especially members: + - VmaAllocatorCreateInfo::device + - VmaAllocatorCreateInfo::vulkanApiVersion + - VmaAllocatorCreateInfo::flags - set appropriate flags for the Vulkan extensions you enabled +-# Create an instance of the #VmaVulkanFunctions structure. +-# Call vmaImportVulkanFunctionsFromVolk(). + Parameter `pAllocatorCreateInfo` is read to find out which functions should be fetched for + appropriate Vulkan version and extensions. + Parameter `pDstVulkanFunctions` is filled with those function pointers, or null if not applicable. +-# Attach the #VmaVulkanFunctions structure to VmaAllocatorCreateInfo::pVulkanFunctions. +-# Call vmaCreateAllocator() to create the #VmaAllocator object. + +Example: + +\code +VmaAllocatorCreateInfo allocatorCreateInfo = {}; +allocatorCreateInfo.physicalDevice = myPhysicalDevice; +allocatorCreateInfo.device = myDevice; +allocatorCreateInfo.instance = myInstance; +allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_3; +allocatorCreateInfo.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT | + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT | + VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT; + +VmaVulkanFunctions vulkanFunctions; +VkResult res = vmaImportVulkanFunctionsFromVolk(&allocatorCreateInfo, &vulkanFunctions); +// Check res... +allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; + +VmaAllocator allocator; +res = vmaCreateAllocator(&allocatorCreateInfo, &allocator); +// Check res... +\endcode + +Internally in this function, pointers to functions related to the entire Vulkan instance are fetched using global function definitions, +while pointers to functions related to the Vulkan device are fetched using `volkLoadDeviceTable()` for given `pAllocatorCreateInfo->device`. + */ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaImportVulkanFunctionsFromVolk( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pAllocatorCreateInfo, + VmaVulkanFunctions* VMA_NOT_NULL pDstVulkanFunctions); +#endif + +/// Creates #VmaAllocator object. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); + +/// Destroys allocator object. +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator VMA_NULLABLE allocator); + +/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. + +It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to +`VkPhysicalDevice`, `VkDevice` etc. every time using this function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); + +/** +PhysicalDeviceProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); + +/** +PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); + +/** +\brief Given Memory Type Index, returns Property Flags of this memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Sets index of the current frame. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t frameIndex); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. Use it for debugging purposes. +For faster but more brief statistics suitable to be called every frame or every allocation, +use vmaGetHeapBudgets(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaTotalStatistics* VMA_NOT_NULL pStats); + +/** \brief Retrieves information about current memory usage and budget for all memory heaps. + +\param allocator +\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator VMA_NOT_NULL allocator, + VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** +\brief Helps to find `memoryTypeIndex`, given `memoryTypeBits` and #VmaAllocationCreateInfo. + +This algorithm tries to find a memory type that: + +- Is allowed by `memoryTypeBits`. +- Contains all the flags from `pAllocationCreateInfo->requiredFlags`. +- Matches intended usage. +- Has as many flags from `pAllocationCreateInfo->preferredFlags` as possible. + +\return Returns `VK_ERROR_FEATURE_NOT_PRESENT` if not found. Receiving such result +from this function or any other allocating function probably means that your +device doesn't support any memory type with requested features for the specific +type of resource you want to use it for. Please check parameters of your +resource, like image layout (`OPTIMAL` versus `LINEAR`) or mip level count. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find `memoryTypeIndex`, given `VkBufferCreateInfo` and #VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It may need to internally create a temporary, dummy buffer that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find `memoryTypeIndex`, given `VkImageCreateInfo` and #VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It may need to internally create a temporary, dummy image that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** \brief Allocates Vulkan device memory and creates #VmaPool object. + +\param allocator Allocator object. +\param pCreateInfo Parameters of pool to create. +\param[out] pPool Handle to created pool. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator VMA_NOT_NULL allocator, + const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); + +/** \brief Destroys #VmaPool object and frees Vulkan device memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NULLABLE pool); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. + +Note that when using the pool from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaStatistics* VMA_NOT_NULL pPoolStats); + +/** \brief Retrieves detailed statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE* VMA_NOT_NULL ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE pName); + +/** \brief General purpose memory allocation. + +\param allocator The main allocator object. +\param pVkMemoryRequirements Requirements for the allocated memory. +\param pCreateInfo Allocation creation parameters. +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional, can be null. Information about allocated memory. It can be also fetched later using vmaGetAllocationInfo(). + +The function creates a #VmaAllocation object without creating a buffer or an image together with it. + +- It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), + vmaCreateBuffer(), vmaCreateImage() instead whenever possible. +- You can also create a buffer or an image later in an existing allocation using + vmaCreateAliasingBuffer2(), vmaCreateAliasingImage2(). +- You can also create a buffer or an image on your own and bind it to an existing allocation + using vmaBindBufferMemory2(), vmaBindImageMemory2(). + +You must free the returned allocation object using vmaFreeMemory() or vmaFreeMemoryPages(). + +There is also extended version of this function: vmaAllocateDedicatedMemory() +that offers additional parameter `pMemoryAllocateNext`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief General purpose allocation of a dedicated memory. + +This function is similar vmaAllocateMemory(), but +it always allocates dedicated memory - flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is implied. +It offers additional parameter `pMemoryAllocateNext`, +which can be used to attach `pNext` chain to the `VkMemoryAllocateInfo` structure. +It can be useful for importing external memory. For more information, see \ref other_api_interop. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateDedicatedMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief General purpose memory allocation for multiple allocation objects at once. + +\param allocator Allocator object. +\param pVkMemoryRequirements Memory requirements for each allocation. +\param pCreateInfo Creation parameters for each allocation. +\param allocationCount Number of allocations to make. +\param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, + size_t allocationCount, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkBuffer`. + +\param allocator +\param buffer +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateBuffer(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkImage`. + +\param allocator +\param image +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateImage(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NULLABLE allocation); + +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + size_t allocationCount, + const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); + +/** \brief Returns current information about specified allocation. + +Current parameters of given allocation are returned in `pAllocationInfo`. + +Although this function doesn't lock any mutex, so it should be quite efficient, +you should avoid calling it too often. +You can retrieve same VmaAllocationInfo structure while creating your resource, from function +vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change +(e.g. due to defragmentation). + +There is also a new function vmaGetAllocationInfo2() that offers extended information +about the allocation, returned using new structure #VmaAllocationInfo2. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); + +/** \brief Returns extended information about specified allocation. + +Current parameters of given allocation are returned in `pAllocationInfo`. +Extended parameters in structure #VmaAllocationInfo2 include memory block size +and a flag telling whether the allocation has dedicated memory. +It can be useful e.g. for interop with OpenGL. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo2* VMA_NOT_NULL pAllocationInfo); + +/** \brief Sets pUserData in given allocation to new value. + +The value of pointer `pUserData` is copied to allocation's `pUserData`. +It is opaque, so you can use it however you want - e.g. +as a pointer, ordinal number or some handle to you own data. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Sets pName in given allocation to new value. + +`pName` must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pName`. String +passed as pName doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +`pName` is freed from memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName); + +/** +\brief Given an allocation, returns Property Flags of its memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetAllocationInfo() + vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + + +#if VMA_EXTERNAL_MEMORY_WIN32 +/** +\brief Given an allocation, returns Win32 handle that may be imported by other processes or APIs. + +\param allocator The main allocator object. +\param allocation Allocation. +\param hTargetProcess A valid handle to target process or null. If it's null, the function returns + handle for the current process. +\param[out] pHandle Output parameter that returns the handle. + +The function fills `pHandle` with handle that can be used in target process. +The handle is fetched using function `vkGetMemoryWin32HandleKHR`. + +Each call to this function creates a new handle that must be closed using: + +\code +CloseHandle(handle); +\endcode + +You can close it any time, before or after destroying the allocation object. +It is reference-counted internally by Windows. + +Note the handle is returned for the entire `VkDeviceMemory` block that the allocation belongs to. +If the allocation is sub-allocated from a larger block, you may need to consider the offset of the allocation +(VmaAllocationInfo::offset). + +This function always uses `VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT`. +An extended version of this function is available as vmaGetMemoryWin32Handle2() +that allows using other handle type. + +This function is available compile-time only when VK_KHR_external_memory_win32 extension is available. +It can be manually disabled by predefining `VMA_EXTERNAL_MEMORY_WIN32=0` macro. + +If the function fails with `VK_ERROR_FEATURE_NOT_PRESENT` error code, please double-check +that VmaVulkanFunctions::vkGetMemoryWin32HandleKHR function pointer is set, e.g. +either by using macro `VMA_DYNAMIC_VULKAN_FUNCTIONS` +or by manually passing it through VmaAllocatorCreateInfo::pVulkanFunctions. + +For more information, see chapter \ref other_api_interop. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + HANDLE hTargetProcess, + HANDLE* VMA_NOT_NULL pHandle); + +/** +\brief Given an allocation, returns Win32 handle that may be imported by other processes or APIs. + +\param allocator The main allocator object. +\param allocation Allocation. +\param handleType Type of handle to be exported. It should be one of: + - `VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR` + - `VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR` + - `VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR` + - `VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR` + - `VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR` + - `VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR` +\param hTargetProcess A valid handle to target process or null. If it's null, the function returns + handle for the current process. +\param[out] pHandle Output parameter that returns the handle. + +The function fills `pHandle` with handle that can be used in target process. +The handle is fetched using function `vkGetMemoryWin32HandleKHR`. + +If `handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR`, +or other NT handle types, +each call to this function creates a new handle that must be closed using: + +\code +CloseHandle(handle); +\endcode + +You can close it any time, before or after destroying the allocation object. +It is reference-counted internally by Windows. + +Note the handle is returned for the entire `VkDeviceMemory` block that the allocation belongs to. +If the allocation is sub-allocated from a larger block, you may need to consider the offset of the allocation +(VmaAllocationInfo::offset). + +This function is available compile-time only when VK_KHR_external_memory_win32 extension is available. +It can be manually disabled by predefining `VMA_EXTERNAL_MEMORY_WIN32=0` macro. + +If the function fails with `VK_ERROR_FEATURE_NOT_PRESENT` error code, please double-check +that VmaVulkanFunctions::vkGetMemoryWin32HandleKHR function pointer is set, e.g. +either by using macro `VMA_DYNAMIC_VULKAN_FUNCTIONS` +or by manually passing it through VmaAllocatorCreateInfo::pVulkanFunctions. + +For more information, see chapter \ref other_api_interop. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE hTargetProcess, + HANDLE* VMA_NOT_NULL pHandle); +#endif // VMA_EXTERNAL_MEMORY_WIN32 + +/** \brief Maps memory represented by given allocation and returns pointer to it. + +Maps memory represented by given allocation to make it accessible to CPU code. +When succeeded, `*ppData` contains pointer to first byte of this memory. + +\warning +If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is +correctly offsetted to the beginning of region assigned to this particular allocation. +Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. +You should not add VmaAllocationInfo::offset to it! + +Mapping is internally reference-counted and synchronized, so despite raw Vulkan +function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` +multiple times simultaneously, it is safe to call this function on allocations +assigned to the same memory block. Actual Vulkan memory will be mapped on first +mapping and unmapped on last unmapping. + +If the function succeeded, you must call vmaUnmapMemory() to unmap the +allocation when mapping is no longer needed or before freeing the allocation, at +the latest. + +It also safe to call this function multiple times on the same allocation. You +must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + +It is also safe to call this function on allocation created with +#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. +You must still call vmaUnmapMemory() same number of times as you called +vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the +"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + +This function fails when used on allocation made in memory type that is not +`HOST_VISIBLE`. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE* VMA_NOT_NULL ppData); + +/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + +For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Flushes memory of given allocation. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Invalidates memory of given allocation. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if +it is called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Flushes memory of given set of allocations. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaFlushAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Invalidates memory of given set of allocations. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaInvalidateAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Maps the allocation temporarily if needed, copies data from specified host pointer to it, and flushes the memory from the host caches if needed. + +\param allocator +\param pSrcHostPointer Pointer to the host data that become source of the copy. +\param dstAllocation Handle to the allocation that becomes destination of the copy. +\param dstAllocationLocalOffset Offset within `dstAllocation` where to write copied data, in bytes. +\param size Number of bytes to copy. + +This is a convenience function that allows to copy data from a host pointer to an allocation easily. +Same behavior can be achieved by calling vmaMapMemory(), `memcpy()`, vmaUnmapMemory(), vmaFlushAllocation(). + +This function can be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Otherwise, the function will fail and generate a Validation Layers error. + +`dstAllocationLocalOffset` is relative to the contents of given `dstAllocation`. +If you mean whole allocation, you should pass 0. +Do not pass allocation's offset within device memory block this parameter! +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation( + VmaAllocator VMA_NOT_NULL allocator, + const void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pSrcHostPointer, + VmaAllocation VMA_NOT_NULL dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size); + +/** \brief Invalidates memory in the host caches if needed, maps the allocation temporarily if needed, and copies data from it to a specified host pointer. + +\param allocator +\param srcAllocation Handle to the allocation that becomes source of the copy. +\param srcAllocationLocalOffset Offset within `srcAllocation` where to read copied data, in bytes. +\param pDstHostPointer Pointer to the host memory that become destination of the copy. +\param size Number of bytes to copy. + +This is a convenience function that allows to copy data from an allocation to a host pointer easily. +Same behavior can be achieved by calling vmaInvalidateAllocation(), vmaMapMemory(), `memcpy()`, vmaUnmapMemory(). + +This function should be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT` flag. +It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Otherwise, the function may fail and generate a Validation Layers error. +It may also work very slowly when reading from an uncached memory. + +`srcAllocationLocalOffset` is relative to the contents of given `srcAllocation`. +If you mean whole allocation, you should pass 0. +Do not pass allocation's offset within device memory block as this parameter! +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pDstHostPointer, + VkDeviceSize size); + +/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + +\param allocator +\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits); + +/** \brief Begins defragmentation process. + +\param allocator Allocator object. +\param pInfo Structure filled with parameters of defragmentation. +\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. +\returns +- `VK_SUCCESS` if defragmentation can begin. +- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. + +For more information about defragmentation, see documentation chapter: +[Defragmentation](@ref defragmentation). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, + VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); + +/** \brief Ends defragmentation process. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pStats Optional stats for the defragmentation. Can be null. + +Use this function to finish defragmentation started by vmaBeginDefragmentation(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationStats* VMA_NULLABLE pStats); + +/** \brief Starts single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pPassInfo Computed information for current pass. +\returns +- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. +- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), + and then preferably try another pass with vmaBeginDefragmentationPass(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Ends single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. + +Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. + +Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. +After this call: + +- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY + (which is the default) will be pointing to the new destination place. +- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY + will be freed. + +If no more moves are possible you can end whole defragmentation. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Binds buffer to allocation. + +Binds specified buffer to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create a buffer, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindBufferMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateBuffer() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param buffer +\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext); + +/** \brief Binds image to allocation. + +Binds specified image to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create an image, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindImageMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateImage() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); + +/** \brief Binds image to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param image +\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext); + +/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. + +\param allocator The main allocator object. +\param pBufferCreateInfo Buffer creation parameters. +\param pAllocationCreateInfo Allocation creation parameters. +\param[out] pBuffer Buffer that was created. +\param[out] pAllocation Allocation that was created. +\param[out] pAllocationInfo Optional, can be null. Information about allocated memory. + It can be also fetched later using vmaGetAllocationInfo(). + +This function automatically: + +-# Creates buffer. +-# Allocates appropriate memory for it. +-# Binds the buffer with the memory. + +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, `*pBuffer` and `*pAllocation` are returned as null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If VK_KHR_dedicated_allocation extenion or Vulkan version >= 1.1 is used, +the function queries the driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible +(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, +although recommended as a good practice, is out of scope of this library and could be implemented +by the user as a higher-level logic on top of VMA. + +There is also an extended versions of this function available with additional parameter `pMemoryAllocateNext` - +see vmaCreateDedicatedBuffer(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a buffer with additional minimum alignment. + +Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, +minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. +for interop with OpenGL. + +\deprecated +This function in obsolete since new VmaAllocationCreateInfo::minAlignment member allows specifying custom +alignment while using any allocation function, like the standard vmaCreateBuffer(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a dedicated buffer while offering extra parameter `pMemoryAllocateNext`. + +This function is similar vmaCreateBuffer(), but +it always allocates dedicated memory for the buffer - flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is implied. +It offers additional parameter `pMemoryAllocateNext`, +which can be used to attach `pNext` chain to the `VkMemoryAllocateInfo` structure. +It can be useful for importing external memory. For more information, see \ref other_api_interop. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateDedicatedBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). + +\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). + +\note This is a new version of the function augmented with parameter `allocationLocalOffset`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Destroys Vulkan buffer and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyBuffer(device, buffer, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It is safe to pass null as buffer and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, + VmaAllocation VMA_NULLABLE allocation); + +/** \brief Function similar to vmaCreateBuffer() but for images. + +There is also an extended version of this function available: vmaCreateDedicatedImage() +which offers additional parameter `pMemoryAllocateNext`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Function similar to vmaCreateDedicatedBuffer() but for images. + +This function is similar vmaCreateImage(), but +it always allocates dedicated memory for the image - flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is implied. +It offers additional parameter `pMemoryAllocateNext`, +which can be used to attach `pNext` chain to the `VkMemoryAllocateInfo` structure. +It can be useful for importing external memory. For more information, see \ref other_api_interop. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateDedicatedImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/// Function similar to vmaCreateAliasingBuffer() but for images. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/// Function similar to vmaCreateAliasingBuffer2() but for images. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/** \brief Destroys Vulkan image and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyImage(device, image, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It is safe to pass null as image and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation); + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \brief Creates new #VmaVirtualBlock object. + +\param pCreateInfo Parameters for creation. +\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); + +/** \brief Destroys #VmaVirtualBlock object. + +Please note that you should consciously handle virtual allocations that could remain unfreed in the block. +You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() +if you are sure this is what you want. If you do neither, an assert is called. + +If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, +don't forget to free them. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( + VmaVirtualBlock VMA_NULLABLE virtualBlock); + +/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. +*/ +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); + +/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. + +If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned +(despite the function doesn't ever allocate actual GPU memory). +`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. + +\param virtualBlock Virtual block +\param pCreateInfo Parameters for the allocation +\param[out] pAllocation Returned handle of the new allocation +\param[out] pOffset Returned offset of the new allocation. Optional, can be null. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset); + +/** \brief Frees virtual allocation inside given #VmaVirtualBlock. + +It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); + +/** \brief Frees all virtual allocations inside given #VmaVirtualBlock. + +You must either call this function or free each virtual allocation individually with vmaVirtualFree() +before destroying a virtual block. Otherwise, an assert is called. + +If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, +don't forget to free it as well. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Changes custom pointer associated with given virtual allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats); + +/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is slow to call. Use for debugging purposes. +For less detailed statistics, see vmaGetVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats); + +/** @} */ + +#if VMA_STATS_STRING_ENABLED +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. +\param virtualBlock Virtual block. +\param[out] ppStatsString Returned string. +\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. + +Returned string must be freed using vmaFreeVirtualBlockStatsString(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +/// Frees a string returned by vmaBuildVirtualBlockStatsString(). +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString); + +/** \brief Builds and returns statistics as a null-terminated string in JSON format. +\param allocator +\param[out] ppStatsString Must be freed using vmaFreeStatsString() function. +\param detailedMap +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE pStatsString); + +/** @} */ + +#endif // VMA_STATS_STRING_ENABLED + +#endif // _VMA_FUNCTION_HEADERS + +#ifdef __cplusplus +} +#endif + +#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// For Visual Studio IntelliSense. +#if defined(__cplusplus) && defined(__INTELLISENSE__) +#define VMA_IMPLEMENTATION +#endif + +#ifdef VMA_IMPLEMENTATION +#undef VMA_IMPLEMENTATION + +#include +#include +#include +#include +#include +#include + +#if !defined(VMA_CPP20) + #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + #define VMA_CPP20 1 + #else + #define VMA_CPP20 0 + #endif +#endif + +#ifdef _MSC_VER + #include // For functions like __popcnt, _BitScanForward etc. +#endif +#if VMA_CPP20 + #include +#endif + +#if VMA_STATS_STRING_ENABLED + #include // For snprintf +#endif + +/******************************************************************************* +CONFIGURATION SECTION + +Define some of these macros before each #include of this header or change them +here if you need other then default behavior depending on your environment. +*/ +#ifndef _VMA_CONFIGURATION + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; +*/ +#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) + #define VMA_STATIC_VULKAN_FUNCTIONS 1 +#endif + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory"); + +To use this feature in new versions of VMA you now have to pass +VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as +VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. +*/ +#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) + #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#endif + +#ifndef VMA_USE_STL_SHARED_MUTEX + #if !defined(__APPLE__) && (__cplusplus >= 201703L || _MSVC_LANG >= 201703L) // C++17 + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif +#endif + +/* +Define this macro to include custom header files without having to edit this file directly, e.g.: + + // Inside of "my_vma_configuration_user_includes.h": + + #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT + #include "my_custom_min.h" // for my_custom_min + #include + #include + + // Inside a different file, which includes "vk_mem_alloc.h": + + #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h" + #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr) + #define VMA_MIN(v1, v2) (my_custom_min(v1, v2)) + #include "vk_mem_alloc.h" + ... + +The following headers are used in this CONFIGURATION section only, so feel free to +remove them if not needed. +*/ +#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) + #include // for assert + #include // for min, max, swap + #include +#else + #include VMA_CONFIGURATION_USER_INCLUDES_H +#endif + +#ifndef VMA_NULL + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr +#endif + +#ifndef VMA_FALLTHROUGH + #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 + #define VMA_FALLTHROUGH [[fallthrough]] + #else + #define VMA_FALLTHROUGH + #endif +#endif + +// Normal assert to check for programmer's errors, especially in Debug configuration. +#ifndef VMA_ASSERT + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif +#endif + +// Assert that will be called very often, like inside data structures e.g. operator[]. +// Making it non-empty can make program slow. +#ifndef VMA_HEAVY_ASSERT + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif +#endif + +// Assert used for reporting memory leaks - unfreed allocations. +#ifndef VMA_ASSERT_LEAK + #define VMA_ASSERT_LEAK(expr) VMA_ASSERT(expr) +#endif + +// If your compiler is not compatible with C++17 and definition of +// aligned_alloc() function is missing, uncommenting following line may help: + +//#include + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) +#include +namespace +{ +void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + return memalign(alignment, size); +} +} // namespace +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) +#include + +#if defined(__APPLE__) +#include +#endif + +namespace +{ +void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) + // Therefore, for now disable this specific exception until a proper solution is found. + //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) + //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 + // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only + // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds + // // MAC_OS_X_VERSION_10_16), even though the function is marked + // // available for 10.15. That is why the preprocessor checks for 10.16 but + // // the __builtin_available checks for 10.15. + // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. + // if (__builtin_available(macOS 10.15, iOS 13, *)) + // return aligned_alloc(alignment, size); + //#endif + //#endif + + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + void *pointer; + if(posix_memalign(&pointer, alignment, size) == 0) + return pointer; + return VMA_NULL; +} +} // namespace +#elif defined(_WIN32) +namespace { +void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return _aligned_malloc(size, alignment); +} +} // namespace +#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 +namespace { +void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return aligned_alloc(alignment, size); +} +} // namespace +#else +namespace +{ +void* vma_aligned_alloc(size_t alignment, size_t size) +{ + VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system."); + return VMA_NULL; +} +} // namespace +#endif + +namespace +{ +#if defined(_WIN32) +void vma_aligned_free(void* ptr) +{ + _aligned_free(ptr); +} +#else +void vma_aligned_free(void* VMA_NULLABLE ptr) +{ + free(ptr); +} +#endif +} // namespace + +#ifndef VMA_ALIGN_OF + #define VMA_ALIGN_OF(type) (alignof(type)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_MALLOC + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_FREE + // VMA_SYSTEM_FREE is the old name, but might have been defined by the user + #if defined(VMA_SYSTEM_FREE) + #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) + #else + #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) + #endif +#endif + +#ifndef VMA_COUNT_BITS_SET + // Returns number of bits set to 1 in (v) + #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) +#endif + +#ifndef VMA_BITSCAN_LSB + // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) +#endif + +#ifndef VMA_BITSCAN_MSB + // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) +#endif + +#ifndef VMA_MIN + #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) +#endif + +#ifndef VMA_MAX + #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) +#endif + +#ifndef VMA_SORT + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#endif + +#ifndef VMA_DEBUG_LOG_FORMAT + #define VMA_DEBUG_LOG_FORMAT(format, ...) + /* + #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \ + printf((format), __VA_ARGS__); \ + printf("\n"); \ + } while(false) + */ +#endif + +#ifndef VMA_DEBUG_LOG + #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str)) +#endif + +#ifndef VMA_LEAK_LOG_FORMAT + #define VMA_LEAK_LOG_FORMAT(format, ...) VMA_DEBUG_LOG_FORMAT(format, __VA_ARGS__) +#endif + +#ifndef VMA_CLASS_NO_COPY + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; +#endif +#ifndef VMA_CLASS_NO_COPY_NO_MOVE + #define VMA_CLASS_NO_COPY_NO_MOVE(className) \ + private: \ + className(const className&) = delete; \ + className(className&&) = delete; \ + className& operator=(const className&) = delete; \ + className& operator=(className&&) = delete; +#endif + +// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. +#if VMA_STATS_STRING_ENABLED +namespace { + inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%" PRIu32, num); + } + inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%" PRIu64, num); + } + inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } +} // namespace +#endif + +#ifndef VMA_MUTEX + class VmaMutex + { + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex) + public: + VmaMutex() = default; + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex +#endif + +// Read-write mutex, where "read" is shared access, "write" is exclusive access. +#ifndef VMA_RW_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && defined(SRWLOCK_INIT) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX +#endif // #ifndef VMA_RW_MUTEX + +/* +If providing your own implementation, you need to implement a subset of std::atomic. +*/ +#ifndef VMA_ATOMIC_UINT32 + #include + #define VMA_ATOMIC_UINT32 std::atomic +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include + #define VMA_ATOMIC_UINT64 std::atomic +#endif + +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +#endif + +#ifndef VMA_MIN_ALIGNMENT + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes. Must be power of two. + */ + #ifdef VMA_DEBUG_ALIGNMENT // Old name + #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT + #else + #define VMA_MIN_ALIGNMENT (1) + #endif +#endif + +#ifndef VMA_DEBUG_MARGIN + /** + Minimum margin after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) +#endif + +#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +#endif + +#ifndef VMA_DEBUG_DETECT_CORRUPTION + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) +#endif + +#ifndef VMA_DEBUG_GLOBAL_MUTEX + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) +#endif + +#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +#endif + +#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + /* + Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount + and return error instead of leaving up to Vulkan implementation what to do in such cases. + */ + #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (1) +#endif + +#ifndef VMA_DEBUG_DONT_EXCEED_HEAP_SIZE_WITH_ALLOCATION_SIZE + /* + Set this to 1 to make VMA never exceed VkPhysicalDeviceMemoryProperties::memoryHeaps[i].size + with a single allocation size VkMemoryAllocateInfo::allocationSize + and return error instead of leaving up to Vulkan implementation what to do in such cases. + It protects agaist validation error VUID-vkAllocateMemory-pAllocateInfo-01713. + On the other hand, allowing exceeding this size may result in a successful allocation despite the validation error. + */ + #define VMA_DEBUG_DONT_EXCEED_HEAP_SIZE_WITH_ALLOCATION_SIZE (1) +#endif + +#ifndef VMA_SMALL_HEAP_MAX_SIZE + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ULL * 1024 * 1024) +#endif + +#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ULL * 1024 * 1024) +#endif + +/* +Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called +or a persistently mapped allocation is created and destroyed several times in a row. +It keeps additional +1 mapping of a device memory block to prevent calling actual +vkMapMemory/vkUnmapMemory too many times, which may improve performance and help +tools like RenderDoc. +*/ +#ifndef VMA_MAPPING_HYSTERESIS_ENABLED + #define VMA_MAPPING_HYSTERESIS_ENABLED 1 +#endif + +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) + +/******************************************************************************* +END OF CONFIGURATION +*/ +#endif // _VMA_CONFIGURATION + +namespace +{ +constexpr uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +constexpr uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; +// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. +constexpr uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; + +// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. +constexpr uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; +constexpr uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; +constexpr uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; +constexpr uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; +constexpr int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; +constexpr uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000U; +constexpr uint32_t VMA_ALLOCATION_TRY_COUNT = 32; +constexpr uint32_t VMA_VENDOR_ID_AMD = 4098; + +// This one is tricky. Vulkan specification defines this code as available since +// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. +// See pull request #207. +#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) +} // namespace + + +#if VMA_STATS_STRING_ENABLED +// Correspond to values of enum VmaSuballocationType. +const char* const VMA_SUBALLOCATION_TYPE_NAMES[] = +{ + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", +}; +#endif + +const VkAllocationCallbacks VmaEmptyAllocationCallbacks = + { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; + + +#ifndef _VMA_ENUM_DECLARATIONS + +enum VmaSuballocationType +{ + VMA_SUBALLOCATION_TYPE_FREE = 0, + VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, + VMA_SUBALLOCATION_TYPE_BUFFER = 2, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, + VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +}; + +enum VMA_CACHE_OPERATION +{ + VMA_CACHE_FLUSH, + VMA_CACHE_INVALIDATE +}; + +enum class VmaAllocationRequestType +{ + Normal, + TLSF, + // Used by "Linear" algorithm. + UpperAddress, + EndOf1st, + EndOf2nd, +}; + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_FORWARD_DECLARATIONS +// Opaque handle used by allocation algorithms to identify single allocation in any conforming way. +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle); + +struct VmaBufferImageUsage; + +struct VmaMutexLock; +struct VmaMutexLockRead; +struct VmaMutexLockWrite; + +template +struct AtomicTransactionalIncrement; + +template +struct VmaStlAllocator; + +template +class VmaVector; + +template +class VmaSmallVector; + +template +class VmaPoolAllocator; + +template +struct VmaListItem; + +template +class VmaRawList; + +template +class VmaList; + +template +class VmaIntrusiveLinkedList; + +#if VMA_STATS_STRING_ENABLED +class VmaStringBuilder; +class VmaJsonWriter; +#endif + +class VmaDeviceMemoryBlock; + +struct VmaDedicatedAllocationListItemTraits; +class VmaDedicatedAllocationList; + +struct VmaSuballocation; +struct VmaSuballocationOffsetLess; +struct VmaSuballocationOffsetGreater; +struct VmaSuballocationItemSizeLess; + +typedef VmaList> VmaSuballocationList; + +struct VmaAllocationRequest; + +class VmaBlockMetadata; +class VmaBlockMetadata_Linear; +class VmaBlockMetadata_TLSF; + +class VmaBlockVector; + +struct VmaPoolListItemTraits; + +struct VmaCurrentBudgetData; + +class VmaAllocationObjectAllocator; + +#endif // _VMA_FORWARD_DECLARATIONS + +#ifndef _VMA_BUFFER_IMAGE_USAGE + +// Finds structure with s->sType == sType in mainStruct->pNext chain. +// Returns pointer to it. If not found, returns null. +template +inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType) +{ + for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext; + s != VMA_NULL; s = s->pNext) + { + if(s->sType == sType) + { + return (const FindT*)s; + } + } + return VMA_NULL; +} + +// An abstraction over buffer or image `usage` flags, depending on available extensions. +struct VmaBufferImageUsage +{ +#if VMA_KHR_MAINTENANCE5 + typedef uint64_t BaseType; // VkFlags64 +#else + typedef uint32_t BaseType; // VkFlags32 +#endif + + static const VmaBufferImageUsage UNKNOWN; + + BaseType Value; + + VmaBufferImageUsage() { *this = UNKNOWN; } + explicit VmaBufferImageUsage(BaseType usage) : Value(usage) { } + VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5); + explicit VmaBufferImageUsage(const VkImageCreateInfo &createInfo); + + bool operator==(const VmaBufferImageUsage& rhs) const { return Value == rhs.Value; } + bool operator!=(const VmaBufferImageUsage& rhs) const { return Value != rhs.Value; } + + bool Contains(BaseType flag) const { return (Value & flag) != 0; } + bool ContainsDeviceAccess() const + { + // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same as VK_BUFFER_IMAGE_TRANSFER*. + return (Value & ~BaseType(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; + } +}; + +const VmaBufferImageUsage VmaBufferImageUsage::UNKNOWN = VmaBufferImageUsage(0); + +VmaBufferImageUsage::VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, + bool useKhrMaintenance5) +{ +#if VMA_KHR_MAINTENANCE5 + if(useKhrMaintenance5) + { + // If VkBufferCreateInfo::pNext chain contains VkBufferUsageFlags2CreateInfoKHR, + // take usage from it and ignore VkBufferCreateInfo::usage, per specification + // of the VK_KHR_maintenance5 extension. + const VkBufferUsageFlags2CreateInfoKHR* const usageFlags2 = + VmaPnextChainFind(&createInfo, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR); + if(usageFlags2 != VMA_NULL) + { + this->Value = usageFlags2->usage; + return; + } + } +#endif + + this->Value = (BaseType)createInfo.usage; +} + +VmaBufferImageUsage::VmaBufferImageUsage(const VkImageCreateInfo &createInfo) + : Value((BaseType)createInfo.usage) +{ + // Maybe in the future there will be VK_KHR_maintenanceN extension with structure + // VkImageUsageFlags2CreateInfoKHR, like the one for buffers... +} + +#endif // _VMA_BUFFER_IMAGE_USAGE + +#ifndef _VMA_FUNCTIONS + +namespace +{ + +/* +Returns number of bits set to 1 in (v). + +On specific platforms and compilers you can use intrinsics like: + +Visual Studio: + return __popcnt(v); +GCC, Clang: + return static_cast(__builtin_popcount(v)); + +Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. +But you need to check in runtime whether user's CPU supports these, as some old processors don't. +*/ +inline uint32_t VmaCountBitsSet(uint32_t v) +{ +#if VMA_CPP20 + return std::popcount(v); +#else + uint32_t c = v - ((v >> 1) & 0x55555555); + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 16) + c) & 0x0000FFFF; + return c; +#endif +} + +inline uint8_t VmaBitScanLSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanForward64(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif VMA_CPP20 + if(mask != 0) + return static_cast(std::countr_zero(mask)); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffsll(mask)) - 1U; +#else + uint8_t pos = 0; + uint64_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 63); + return UINT8_MAX; +#endif +} + +inline uint8_t VmaBitScanLSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanForward(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif VMA_CPP20 + if(mask != 0) + return static_cast(std::countr_zero(mask)); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffs(mask)) - 1U; +#else + uint8_t pos = 0; + uint32_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 31); + return UINT8_MAX; +#endif +} + +inline uint8_t VmaBitScanMSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanReverse64(&pos, mask)) + return static_cast(pos); +#elif VMA_CPP20 + if(mask != 0) + return 63 - static_cast(std::countl_zero(mask)); +#elif defined __GNUC__ || defined __clang__ + if (mask != 0) + return 63 - static_cast(__builtin_clzll(mask)); +#else + uint8_t pos = 63; + uint64_t bit = 1ULL << 63; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +inline uint8_t VmaBitScanMSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanReverse(&pos, mask)) + return static_cast(pos); +#elif VMA_CPP20 + if(mask != 0) + return 31 - static_cast(std::countl_zero(mask)); +#elif defined __GNUC__ || defined __clang__ + if (mask != 0) + return 31 - static_cast(__builtin_clz(mask)); +#else + uint8_t pos = 31; + uint32_t bit = 1UL << 31; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +/* +Returns true if given number is a power of two. +T must be unsigned integer number or signed integer but always nonnegative. +For 0 returns true. +*/ +template +inline bool VmaIsPow2(T x) +{ + return (x & (x - 1)) == 0; +} + +// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. +// Use types like uint32_t, uint64_t as T. +template +inline T VmaAlignUp(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return (val + alignment - 1) & ~(alignment - 1); +} + +// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8. +// Use types like uint32_t, uint64_t as T. +template +inline T VmaAlignDown(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return val & ~(alignment - 1); +} + +// Division with mathematical rounding to nearest number. +template +inline T VmaRoundDiv(T x, T y) +{ + return (x + (y / (T)2)) / y; +} + +// Divide by 'y' and round up to nearest integer. +template +inline T VmaDivideRoundingUp(T x, T y) +{ + return (x + y - (T)1) / y; +} + +// Returns smallest power of 2 greater or equal to v. +inline uint32_t VmaNextPow2(uint32_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; +} + +inline uint64_t VmaNextPow2(uint64_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +// Returns largest power of 2 less or equal to v. +inline uint32_t VmaPrevPow2(uint32_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = v ^ (v >> 1); + return v; +} + +inline uint64_t VmaPrevPow2(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v = v ^ (v >> 1); + return v; +} + +inline bool VmaStrIsEmpty(const char* pStr) +{ + return pStr == VMA_NULL || *pStr == '\0'; +} + +/* +Returns true if two memory blocks occupy overlapping pages. +ResourceA must be in less memory offset than ResourceB. + +Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" +chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". +*/ +inline bool VmaBlocksOnSamePage( + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ + VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); + VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; + VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); + VkDeviceSize resourceBStart = resourceBOffset; + VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); + return resourceAEndPage == resourceBStartPage; +} + +/* +Returns true if given suballocation types could conflict and must respect +VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer +or linear image and another one is optimal image. If type is unknown, behave +conservatively. +*/ +inline bool VmaIsBufferImageGranularityConflict( + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if (suballocType1 > suballocType2) + { + std::swap(suballocType1, suballocType2); + } + + switch (suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; + } +} + +void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pDst) + { + *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; + } +#else + // no-op +#endif +} + +bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { + return false; + } + } +#endif + return true; +} + +/* +Fills structure with parameters of an example buffer to be used for transfers +during GPU memory defragmentation. +*/ +void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ + memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); + outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. +} + + +/* +Performs binary search and returns iterator to first element that is greater or +equal to (key), according to comparison (cmp). + +Cmp should return true if first argument is less than second argument. + +Returned value is the found element, if present in the collection or place where +new element with value (key) should be inserted. +*/ +template +IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) +{ + size_t down = 0; + size_t up = size_t(end - beg); + while (down < up) + { + const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation + if (cmp(*(beg + mid), key)) + { + down = mid + 1; + } + else + { + up = mid; + } + } + return beg + down; +} + +template +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess( + beg, end, value, cmp); + if (it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + +/* +Returns true if all pointers in the array are not-null and unique. +Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. +T must be pointer type, e.g. VmaAllocation, VmaPool. +*/ +template +bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for (uint32_t i = 0; i < count; ++i) + { + const T iPtr = arr[i]; + if (iPtr == VMA_NULL) + { + return false; + } + for (uint32_t j = i + 1; j < count; ++j) + { + if (iPtr == arr[j]) + { + return false; + } + } + } + return true; +} + +template +inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) +{ + newStruct->pNext = mainStruct->pNext; + mainStruct->pNext = newStruct; +} + +// This is the main algorithm that guides the selection of a memory type best for an allocation - +// converts usage to required/preferred/not preferred flags. +bool FindMemoryPreferences( + bool isIntegratedGPU, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaBufferImageUsage bufImgUsage, + VkMemoryPropertyFlags& outRequiredFlags, + VkMemoryPropertyFlags& outPreferredFlags, + VkMemoryPropertyFlags& outNotPreferredFlags) +{ + outRequiredFlags = allocCreateInfo.requiredFlags; + outPreferredFlags = allocCreateInfo.preferredFlags; + outNotPreferredFlags = 0; + + switch(allocCreateInfo.usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + case VMA_MEMORY_USAGE_AUTO: + case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: + case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: + { + if(bufImgUsage == VmaBufferImageUsage::UNKNOWN) + { + VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known." + " Maybe you use VkBufferUsageFlags2CreateInfoKHR but forgot to use VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT?" ); + return false; + } + + const bool deviceAccess = bufImgUsage.ContainsDeviceAccess(); + const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; + const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; + const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; + const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; + + // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. + if(hostAccessRandom) + { + // Prefer cached. Cannot require it, because some platforms don't have it (e.g. Raspberry Pi - see #362)! + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. + // Omitting HOST_VISIBLE here is intentional. + // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. + // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + else + { + if(hostAccessAllowTransferInstead) + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + else + // Always CPU memory. + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + } + // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. + else if(hostAccessSequentialWrite) + { + // Want uncached and write-combined. + outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + else + { + if(hostAccessAllowTransferInstead) + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + else + // Always CPU memory. + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + + // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) + if(deviceAccess) + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) + else + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. + if(preferDevice) + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + } + } + // No CPU access + else + { + // if(deviceAccess) + // + // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory, + // unless there is a clear preference from the user not to do so. + // + // else: + // + // No direct GPU access, no CPU access, just transfers. + // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or + // a "swap file" copy to free some GPU memory (then better CPU memory). + // Up to the user to decide. If no preferece, assume the former and choose GPU memory. + + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + } + default: + VMA_ASSERT(0); + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; + } + + return true; +} + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation + +inline void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + void* result = VMA_NULL; + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { + result = (*pAllocationCallbacks->pfnAllocation)( + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { + result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); + } + VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); + return result; +} + +inline void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { + (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); + } + else + { + VMA_SYSTEM_ALIGNED_FREE(ptr); + } +} + +template +T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) + +#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) + +template +void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ + ptr->~T(); + VmaFree(pAllocationCallbacks, ptr); +} + +template +void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if (ptr != VMA_NULL) + { + for (size_t i = count; i--; ) + { + ptr[i].~T(); + } + VmaFree(pAllocationCallbacks, ptr); + } +} + +char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if (srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + return VMA_NULL; +} + +#if VMA_STATS_STRING_ENABLED +char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) +{ + if (srcStr != VMA_NULL) + { + char* const result = vma_new_array(allocs, char, strLen + 1); + memcpy(result, srcStr, strLen); + result[strLen] = '\0'; + return result; + } + return VMA_NULL; +} +#endif // VMA_STATS_STRING_ENABLED + +void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if (str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + +template +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); + VmaVectorInsert(vector, indexToInsert, value); + return indexToInsert; +} + +template +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + CmpLess comparator; + typename VectorT::iterator it = VmaBinaryFindFirstNotLess( + vector.begin(), + vector.end(), + value, + comparator); + if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { + size_t indexToRemove = it - vector.begin(); + VmaVectorRemove(vector, indexToRemove); + return true; + } + return false; +} + +} // namespace + +#endif // _VMA_FUNCTIONS + +#ifndef _VMA_STATISTICS_FUNCTIONS + +namespace +{ + +void VmaClearStatistics(VmaStatistics& outStats) +{ + outStats.blockCount = 0; + outStats.allocationCount = 0; + outStats.blockBytes = 0; + outStats.allocationBytes = 0; +} + +void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) +{ + inoutStats.blockCount += src.blockCount; + inoutStats.allocationCount += src.allocationCount; + inoutStats.blockBytes += src.blockBytes; + inoutStats.allocationBytes += src.allocationBytes; +} + +void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) +{ + VmaClearStatistics(outStats.statistics); + outStats.unusedRangeCount = 0; + outStats.allocationSizeMin = VK_WHOLE_SIZE; + outStats.allocationSizeMax = 0; + outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; + outStats.unusedRangeSizeMax = 0; +} + +void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.statistics.allocationCount++; + inoutStats.statistics.allocationBytes += size; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); +} + +void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.unusedRangeCount++; + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); +} + +void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) +{ + VmaAddStatistics(inoutStats.statistics, src.statistics); + inoutStats.unusedRangeCount += src.unusedRangeCount; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); +} + +} // namespace + +#endif // _VMA_STATISTICS_FUNCTIONS + +#ifndef _VMA_MUTEX_LOCK +// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). +struct VmaMutexLock +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock) +public: + explicit VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->Lock(); } + } + ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } + +private: + VMA_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. +struct VmaMutexLockRead +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead) +public: + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockRead(); } + } + ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. +struct VmaMutexLockWrite +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite) +public: + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) + : m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockWrite(); } + } + ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +#if VMA_DEBUG_GLOBAL_MUTEX + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +#else + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#endif +#endif // _VMA_MUTEX_LOCK + +#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT +// An object that increments given atomic but decrements it back in the destructor unless Commit() is called. +template +struct AtomicTransactionalIncrement +{ +public: + using T = decltype(AtomicT().load()); + + ~AtomicTransactionalIncrement() + { + if(m_Atomic) + --(*m_Atomic); + } + + void Commit() { m_Atomic = VMA_NULL; } + T Increment(AtomicT* atomic) + { + m_Atomic = atomic; + return m_Atomic->fetch_add(1); + } + +private: + AtomicT* m_Atomic = VMA_NULL; +}; +#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT + +#ifndef _VMA_STL_ALLOCATOR +// STL-compatible allocator. +template +struct VmaStlAllocator +{ + const VkAllocationCallbacks* const m_pCallbacks; + typedef T value_type; + + explicit VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} + template + explicit VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} + VmaStlAllocator(const VmaStlAllocator&) = default; + VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; + + T* allocate(size_t n); + void deallocate(T* p, size_t n); + + template + bool operator==(const VmaStlAllocator& rhs) const + { + return m_pCallbacks == rhs.m_pCallbacks; + } + template + bool operator!=(const VmaStlAllocator& rhs) const + { + return m_pCallbacks != rhs.m_pCallbacks; + } +}; + +template +T* VmaStlAllocator::allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + +template +void VmaStlAllocator::deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } +#endif // _VMA_STL_ALLOCATOR + +#ifndef _VMA_VECTOR +/* Class with interface compatible with subset of std::vector. +T must be POD because constructors and destructors are not called and memcpy is +used for these objects. */ +template +class VmaVector +{ +public: + typedef T value_type; + typedef T* iterator; + typedef const T* const_iterator; + + explicit VmaVector(const AllocatorT& allocator); + VmaVector(size_t count, const AllocatorT& allocator); + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} + VmaVector(const VmaVector& src); + VmaVector& operator=(const VmaVector& rhs); + ~VmaVector(); + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_pArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + const T* data() const { return m_pArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + + iterator begin() { return m_pArray; } + iterator end() { return m_pArray + m_Count; } + const_iterator cbegin() const { return m_pArray; } + const_iterator cend() const { return m_pArray + m_Count; } + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void reserve(size_t newCapacity, bool freeMemory = false); + void resize(size_t newCount); + void clear() { resize(0); } + void shrink_to_fit(); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + +private: + AllocatorT m_Allocator; + T* m_pArray; + size_t m_Count; + size_t m_Capacity; +}; + +#ifndef _VMA_VECTOR_FUNCTIONS +template +VmaVector::~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } + +template +VmaVector::VmaVector(const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) {} + +template +VmaVector::VmaVector(size_t count, const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) {} + +template +VmaVector::VmaVector(const VmaVector& src) + : m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) +{ + if (m_Count != 0) + { + memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); + } +} + +template +VmaVector& VmaVector::operator=(const VmaVector& rhs) +{ + if (&rhs != this) + { + resize(rhs.m_Count); + if (m_Count != 0) + { + memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); + } + } + return *this; +} + +template +void VmaVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + m_pArray[newIndex] = src; +} + +template +void VmaVector::reserve(size_t newCapacity, bool freeMemory) +{ + newCapacity = VMA_MAX(newCapacity, m_Count); + + if ((newCapacity < m_Capacity) && !freeMemory) + { + newCapacity = m_Capacity; + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + if (m_Count != 0) + { + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } +} + +template +void VmaVector::resize(size_t newCount) +{ + size_t newCapacity = m_Capacity; + if (newCount > m_Capacity) + { + newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); + } + + if (newCapacity != m_Capacity) + { + VMA_HEAVY_ASSERT(newCapacity > 0); + T* const newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity); + const size_t elementsToCopy = VMA_MIN(m_Count, newCount); + if (elementsToCopy != 0) + { + memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + + m_Count = newCount; +} + +template +void VmaVector::shrink_to_fit() +{ + if (m_Capacity > m_Count) + { + T* newArray = VMA_NULL; + if (m_Count > 0) + { + newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = m_Count; + m_pArray = newArray; + } +} + +template +void VmaVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + if (index < oldCount) + { + memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); + } + m_pArray[index] = src; +} + +template +void VmaVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_VECTOR_FUNCTIONS + +namespace +{ + +template +void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +{ + vec.insert(index, item); +} + +template +void VmaVectorRemove(VmaVector& vec, size_t index) +{ + vec.remove(index); +} + +} // namespace + +#endif // _VMA_VECTOR + +#ifndef _VMA_SMALL_VECTOR +/* +This is a vector (a variable-sized array), optimized for the case when the array is small. + +It contains some number of elements in-place, which allows it to avoid heap allocation +when the actual number of elements is below that threshold. This allows normal "small" +cases to be fast without losing generality for large inputs. +*/ +template +class VmaSmallVector +{ +public: + typedef T value_type; + typedef T* iterator; + + explicit VmaSmallVector(const AllocatorT& allocator); + VmaSmallVector(size_t count, const AllocatorT& allocator); + template + explicit VmaSmallVector(const VmaSmallVector&) = delete; + template + VmaSmallVector& operator=(const VmaSmallVector&) = delete; + ~VmaSmallVector() = default; + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + + iterator begin() { return data(); } + iterator end() { return data() + m_Count; } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void resize(size_t newCount, bool freeMemory = false); + void clear(bool freeMemory = false); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + +private: + size_t m_Count; + T m_StaticArray[N]; // Used when m_Size <= N + VmaVector m_DynamicArray; // Used when m_Size > N +}; + +#ifndef _VMA_SMALL_VECTOR_FUNCTIONS +template +VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) + : m_Count(0), + m_DynamicArray(allocator) {} + +template +VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) + : m_Count(count), + m_DynamicArray(count > N ? count : 0, allocator) {} + +template +void VmaSmallVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + data()[newIndex] = src; +} + +template +void VmaSmallVector::resize(size_t newCount, bool freeMemory) +{ + if (newCount > N && m_Count > N) + { + // Any direction, staying in m_DynamicArray + m_DynamicArray.resize(newCount); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else if (newCount > N && m_Count <= N) + { + // Growing, moving from m_StaticArray to m_DynamicArray + m_DynamicArray.resize(newCount); + if (m_Count > 0) + { + memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); + } + } + else if (newCount <= N && m_Count > N) + { + // Shrinking, moving from m_DynamicArray to m_StaticArray + if (newCount > 0) + { + memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); + } + m_DynamicArray.resize(0); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else + { + // Any direction, staying in m_StaticArray - nothing to do here + } + m_Count = newCount; +} + +template +void VmaSmallVector::clear(bool freeMemory) +{ + m_DynamicArray.clear(); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + m_Count = 0; +} + +template +void VmaSmallVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + T* const dataPtr = data(); + if (index < oldCount) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. + memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); + } + dataPtr[index] = src; +} + +template +void VmaSmallVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. + T* const dataPtr = data(); + memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_SMALL_VECTOR_FUNCTIONS +#endif // _VMA_SMALL_VECTOR + +#ifndef _VMA_POOL_ALLOCATOR +/* +Allocator for objects of type T using a list of arrays (pools) to speed up +allocation. Number of elements that can be allocated is not bounded because +allocator can create multiple blocks. +*/ +template +class VmaPoolAllocator +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator) +public: + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + ~VmaPoolAllocator(); + template T* Alloc(Types&&... args); + void Free(T* ptr); + +private: + union Item + { + uint32_t NextFreeIndex; + alignas(T) char Value[sizeof(T)]; + }; + struct ItemBlock + { + Item* pItems; + uint32_t Capacity; + uint32_t FirstFreeIndex; + }; + + const VkAllocationCallbacks* m_pAllocationCallbacks; + const uint32_t m_FirstBlockCapacity; + VmaVector> m_ItemBlocks; + + ItemBlock& CreateNewBlock(); +}; + +#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) +{ + VMA_ASSERT(m_FirstBlockCapacity > 1); +} + +template +VmaPoolAllocator::~VmaPoolAllocator() +{ + for (size_t i = m_ItemBlocks.size(); i--;) + vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); + m_ItemBlocks.clear(); +} + +template +template T* VmaPoolAllocator::Alloc(Types&&... args) +{ + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // This block has some free items: Use first one. + if (block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; + block.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; + } + } + + // No block has free item: Create new one and use it. + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; + newBlock.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result) T(std::forward(args)...); // Explicit constructor call. + return result; +} + +template +void VmaPoolAllocator::Free(T* ptr) +{ + // Search all memory blocks to find ptr. + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + + // Casting to union. + Item* pItemPtr = VMA_NULL; + memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); + + // Check if pItemPtr is in address range of this block. + if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. + const uint32_t index = static_cast(pItemPtr - block.pItems); + pItemPtr->NextFreeIndex = block.FirstFreeIndex; + block.FirstFreeIndex = index; + return; + } + } + VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); +} + +template +typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +{ + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + + const ItemBlock newBlock = + { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0 + }; + + m_ItemBlocks.push_back(newBlock); + + // Setup singly-linked list of all free items in this block. + for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) + newBlock.pItems[i].NextFreeIndex = i + 1; + newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; + return m_ItemBlocks.back(); +} +#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS +#endif // _VMA_POOL_ALLOCATOR + +#ifndef _VMA_RAW_LIST +template +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; + T Value; +}; + +// Doubly linked list. +template +class VmaRawList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList) +public: + typedef VmaListItem ItemType; + + explicit VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + // Intentionally not calling Clear, because that would be unnecessary + // computations to return all items to m_ItemAllocator as free. + ~VmaRawList() = default; + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + + ItemType* Front() { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Front() const { return m_pFront; } + const ItemType* Back() const { return m_pBack; } + + ItemType* PushFront(); + ItemType* PushBack(); + ItemType* PushFront(const T& value); + ItemType* PushBack(const T& value); + void PopFront(); + void PopBack(); + + // Item can be null - it means PushBack. + ItemType* InsertBefore(ItemType* pItem); + // Item can be null - it means PushFront. + ItemType* InsertAfter(ItemType* pItem); + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); + + void Clear(); + void Remove(ItemType* pItem); + +private: + const VkAllocationCallbacks* const m_pAllocationCallbacks; + VmaPoolAllocator m_ItemAllocator; + ItemType* m_pFront; + ItemType* m_pBack; + size_t m_Count; +}; + +#ifndef _VMA_RAW_LIST_FUNCTIONS +template +VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) {} + +template +VmaListItem* VmaRawList::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pPrev = VMA_NULL; + if (IsEmpty()) + { + pNewItem->pNext = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pNext = m_pFront; + m_pFront->pPrev = pNewItem; + m_pFront = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pNext = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pPrev = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pPrev = m_pBack; + m_pBack->pNext = pNewItem; + m_pBack = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); + pNewItem->Value = value; + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); + pNewItem->Value = value; + return pNewItem; +} + +template +void VmaRawList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if (pNextItem != VMA_NULL) + { + pNextItem->pPrev = VMA_NULL; + } + m_pFront = pNextItem; + m_ItemAllocator.Free(pFrontItem); + --m_Count; +} + +template +void VmaRawList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { + pPrevItem->pNext = VMA_NULL; + } + m_pBack = pPrevItem; + m_ItemAllocator.Free(pBackItem); + --m_Count; +} + +template +void VmaRawList::Clear() +{ + if (!IsEmpty()) + { + ItemType* pItem = m_pBack; + while (pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; + m_ItemAllocator.Free(pItem); + pItem = pPrevItem; + } + m_pFront = VMA_NULL; + m_pBack = VMA_NULL; + m_Count = 0; + } +} + +template +void VmaRawList::Remove(ItemType* pItem) +{ + VMA_HEAVY_ASSERT(pItem != VMA_NULL); + VMA_HEAVY_ASSERT(m_Count > 0); + + if(pItem->pPrev != VMA_NULL) + { + pItem->pPrev->pNext = pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = pItem->pNext; + } + + if(pItem->pNext != VMA_NULL) + { + pItem->pNext->pPrev = pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = pItem->pPrev; + } + + m_ItemAllocator.Free(pItem); + --m_Count; +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pPrev = prevItem; + newItem->pNext = pItem; + pItem->pPrev = newItem; + if(prevItem != VMA_NULL) + { + prevItem->pNext = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = newItem; + } + ++m_Count; + return newItem; + } + return PushBack(); +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pNext = nextItem; + newItem->pPrev = pItem; + pItem->pNext = newItem; + if(nextItem != VMA_NULL) + { + nextItem->pPrev = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = newItem; + } + ++m_Count; + return newItem; + } + return PushFront(); +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); + newItem->Value = value; + return newItem; +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); + newItem->Value = value; + return newItem; +} +#endif // _VMA_RAW_LIST_FUNCTIONS +#endif // _VMA_RAW_LIST + +#ifndef _VMA_LIST +template +class VmaList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaList) +public: + class reverse_iterator; + class const_iterator; + class const_reverse_iterator; + + class iterator + { + friend class const_iterator; + friend class VmaList; + public: + iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + explicit iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const iterator operator++(int) { iterator result = *this; ++*this; return result; } + const iterator operator--(int) { iterator result = *this; --*this; return result; } + + iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class reverse_iterator + { + friend class const_reverse_iterator; + friend class VmaList; + public: + reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + explicit reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } + const reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } + + reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + reverse_iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_iterator + { + friend class VmaList; + public: + const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + explicit const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + explicit const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } + const const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } + + const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + const_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_reverse_iterator + { + friend class VmaList; + public: + const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + explicit const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + explicit const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } + const const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } + + const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + const_reverse_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + + explicit VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} + + bool empty() const { return m_RawList.IsEmpty(); } + size_t size() const { return m_RawList.GetCount(); } + + iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } + iterator end() { return iterator(&m_RawList, VMA_NULL); } + + const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } + const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } + + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } + reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } + const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator rbegin() const { return crbegin(); } + const_reverse_iterator rend() const { return crend(); } + + void push_back(const T& value) { m_RawList.PushBack(value); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + + void clear() { m_RawList.Clear(); } + void erase(iterator it) { m_RawList.Remove(it.m_pItem); } + +private: + VmaRawList m_RawList; +}; + +#ifndef _VMA_LIST_FUNCTIONS +template +typename VmaList::iterator& VmaList::iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Front(); + } + return *this; +} + +template +typename VmaList::const_iterator& VmaList::const_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} +#endif // _VMA_LIST_FUNCTIONS +#endif // _VMA_LIST + +#ifndef _VMA_INTRUSIVE_LINKED_LIST +/* +Expected interface of ItemTypeTraits: +struct MyItemTypeTraits +{ + typedef MyItem ItemType; + static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; } + static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; } + static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; } + static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } +}; +*/ +template +class VmaIntrusiveLinkedList +{ +public: + typedef typename ItemTypeTraits::ItemType ItemType; + static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } + static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } + + // Movable, not copyable. + VmaIntrusiveLinkedList() = default; + VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src) noexcept; + VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; + VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src) noexcept; + VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; + ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + ItemType* Front() { return m_Front; } + ItemType* Back() { return m_Back; } + const ItemType* Front() const { return m_Front; } + const ItemType* Back() const { return m_Back; } + + void PushBack(ItemType* item); + void PushFront(ItemType* item); + ItemType* PopBack(); + ItemType* PopFront(); + + // MyItem can be null - it means PushBack. + void InsertBefore(ItemType* existingItem, ItemType* newItem); + // MyItem can be null - it means PushFront. + void InsertAfter(ItemType* existingItem, ItemType* newItem); + void Remove(ItemType* item); + void RemoveAll(); + +private: + ItemType* m_Front = VMA_NULL; + ItemType* m_Back = VMA_NULL; + size_t m_Count = 0; +}; + +#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +template +VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) noexcept + : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) +{ + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; +} + +template +VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) noexcept +{ + if (&src != this) + { + VMA_HEAVY_ASSERT(IsEmpty()); + m_Front = src.m_Front; + m_Back = src.m_Back; + m_Count = src.m_Count; + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; + } + return *this; +} + +template +void VmaIntrusiveLinkedList::PushBack(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessPrev(item) = m_Back; + ItemTypeTraits::AccessNext(m_Back) = item; + m_Back = item; + ++m_Count; + } +} + +template +void VmaIntrusiveLinkedList::PushFront(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessNext(item) = m_Front; + ItemTypeTraits::AccessPrev(m_Front) = item; + m_Front = item; + ++m_Count; + } +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const backItem = m_Back; + ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; + } + m_Back = prevItem; + --m_Count; + ItemTypeTraits::AccessPrev(backItem) = VMA_NULL; + ItemTypeTraits::AccessNext(backItem) = VMA_NULL; + return backItem; +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const frontItem = m_Front; + ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; + } + m_Front = nextItem; + --m_Count; + ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL; + ItemTypeTraits::AccessNext(frontItem) = VMA_NULL; + return frontItem; +} + +template +void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); + ItemTypeTraits::AccessPrev(newItem) = prevItem; + ItemTypeTraits::AccessNext(newItem) = existingItem; + ItemTypeTraits::AccessPrev(existingItem) = newItem; + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Front == existingItem); + m_Front = newItem; + } + ++m_Count; + } + else + PushBack(newItem); +} + +template +void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); + ItemTypeTraits::AccessNext(newItem) = nextItem; + ItemTypeTraits::AccessPrev(newItem) = existingItem; + ItemTypeTraits::AccessNext(existingItem) = newItem; + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Back == existingItem); + m_Back = newItem; + } + ++m_Count; + } + else + return PushFront(newItem); +} + +template +void VmaIntrusiveLinkedList::Remove(ItemType* item) +{ + VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); + if (ItemTypeTraits::GetPrev(item) != VMA_NULL) + { + ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item); + } + else + { + VMA_HEAVY_ASSERT(m_Front == item); + m_Front = ItemTypeTraits::GetNext(item); + } + + if (ItemTypeTraits::GetNext(item) != VMA_NULL) + { + ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item); + } + else + { + VMA_HEAVY_ASSERT(m_Back == item); + m_Back = ItemTypeTraits::GetPrev(item); + } + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + --m_Count; +} + +template +void VmaIntrusiveLinkedList::RemoveAll() +{ + if (!IsEmpty()) + { + ItemType* item = m_Back; + while (item != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + item = prevItem; + } + m_Front = VMA_NULL; + m_Back = VMA_NULL; + m_Count = 0; + } +} +#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +#endif // _VMA_INTRUSIVE_LINKED_LIST + +#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED +class VmaStringBuilder +{ +public: + explicit VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} + ~VmaStringBuilder() = default; + + size_t GetLength() const { return m_Data.size(); } + // Returned string is not null-terminated! + const char* GetData() const { return m_Data.data(); } + void AddNewLine() { Add('\n'); } + void Add(char ch) { m_Data.push_back(ch); } + + void Add(const char* pStr); + void AddNumber(uint32_t num); + void AddNumber(uint64_t num); + void AddPointer(const void* ptr); + +private: + VmaVector> m_Data; +}; + +#ifndef _VMA_STRING_BUILDER_FUNCTIONS +void VmaStringBuilder::Add(const char* pStr) +{ + const size_t strLen = strlen(pStr); + if (strLen > 0) + { + const size_t oldCount = m_Data.size(); + m_Data.resize(oldCount + strLen); + memcpy(m_Data.data() + oldCount, pStr, strLen); + } +} + +void VmaStringBuilder::AddNumber(uint32_t num) +{ + char buf[11]; + buf[10] = '\0'; + char* p = &buf[10]; + do + { + *--p = '0' + (char)(num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddNumber(uint64_t num) +{ + char buf[21]; + buf[20] = '\0'; + char* p = &buf[20]; + do + { + *--p = '0' + (char)(num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddPointer(const void* ptr) +{ + char buf[21]; + VmaPtrToStr(buf, sizeof(buf), ptr); + Add(buf); +} +#endif //_VMA_STRING_BUILDER_FUNCTIONS +#endif // _VMA_STRING_BUILDER + +#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED +/* +Allows to conveniently build a correct JSON document to be written to the +VmaStringBuilder passed to the constructor. +*/ +class VmaJsonWriter +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter) +public: + // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + ~VmaJsonWriter(); + + // Begins object by writing "{". + // Inside an object, you must call pairs of WriteString and a value, e.g.: + // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject(); + // Will write: { "A": 1, "B": 2 } + void BeginObject(bool singleLine = false); + // Ends object by writing "}". + void EndObject(); + + // Begins array by writing "[". + // Inside an array, you can write a sequence of any values. + void BeginArray(bool singleLine = false); + // Ends array by writing "[". + void EndArray(); + + // Writes a string value inside "". + // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. + void WriteString(const char* pStr); + + // Begins writing a string value. + // Call BeginString, ContinueString, ContinueString, ..., EndString instead of + // WriteString to conveniently build the string content incrementally, made of + // parts including numbers. + void BeginString(const char* pStr = VMA_NULL); + // Posts next part of an open string. + void ContinueString(const char* pStr); + // Posts next part of an open string. The number is converted to decimal characters. + void ContinueString(uint32_t n); + void ContinueString(uint64_t n); + // Posts next part of an open string. Pointer value is converted to characters + // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 + void ContinueString_Pointer(const void* ptr); + // Ends writing a string value by writing '"'. + void EndString(const char* pStr = VMA_NULL); + + // Writes a number value. + void WriteNumber(uint32_t n); + void WriteNumber(uint64_t n); + // Writes a boolean value - false or true. + void WriteBool(bool b); + // Writes a null value. + void WriteNull(); + +private: + enum COLLECTION_TYPE + { + COLLECTION_TYPE_OBJECT, + COLLECTION_TYPE_ARRAY, + }; + struct StackItem + { + COLLECTION_TYPE type; + uint32_t valueCount; + bool singleLineMode; + }; + + static const char* const INDENT; + + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator > m_Stack; + bool m_InsideString; + + void BeginValue(bool isString); + void WriteIndent(bool oneLess = false); +}; +const char* const VmaJsonWriter::INDENT = " "; + +#ifndef _VMA_JSON_WRITER_FUNCTIONS +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) + : m_SB(sb), + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) {} + +VmaJsonWriter::~VmaJsonWriter() +{ + VMA_ASSERT(!m_InsideString); + VMA_ASSERT(m_Stack.empty()); +} + +void VmaJsonWriter::BeginObject(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('{'); + + StackItem item; + item.type = COLLECTION_TYPE_OBJECT; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndObject() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add('}'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); + m_Stack.pop_back(); +} + +void VmaJsonWriter::BeginArray(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('['); + + StackItem item; + item.type = COLLECTION_TYPE_ARRAY; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndArray() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add(']'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); + m_Stack.pop_back(); +} + +void VmaJsonWriter::WriteString(const char* pStr) +{ + BeginString(pStr); + EndString(); +} + +void VmaJsonWriter::BeginString(const char* pStr) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(true); + m_SB.Add('"'); + m_InsideString = true; + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } +} + +void VmaJsonWriter::ContinueString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + + const size_t strLen = strlen(pStr); + for (size_t i = 0; i < strLen; ++i) + { + char ch = pStr[i]; + if (ch == '\\') + { + m_SB.Add("\\\\"); + } + else if (ch == '"') + { + m_SB.Add("\\\""); + } + else if ((uint8_t)ch >= 32) + { + m_SB.Add(ch); + } + else switch (ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + } + } +} + +void VmaJsonWriter::ContinueString(uint32_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString(uint64_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddPointer(ptr); +} + +void VmaJsonWriter::EndString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } + m_SB.Add('"'); + m_InsideString = false; +} + +void VmaJsonWriter::WriteNumber(uint32_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteNumber(uint64_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteBool(bool b) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add(b ? "true" : "false"); +} + +void VmaJsonWriter::WriteNull() +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add("null"); +} + +void VmaJsonWriter::BeginValue(bool isString) +{ + if (!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { + VMA_ASSERT(isString); + } + + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { + m_SB.Add(": "); + } + else if (currItem.valueCount > 0) + { + m_SB.Add(", "); + WriteIndent(); + } + else + { + WriteIndent(); + } + ++currItem.valueCount; + } +} + +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if (!m_Stack.empty() && !m_Stack.back().singleLineMode) + { + m_SB.AddNewLine(); + + size_t count = m_Stack.size(); + if (count > 0 && oneLess) + { + --count; + } + for (size_t i = 0; i < count; ++i) + { + m_SB.Add(INDENT); + } + } +} +#endif // _VMA_JSON_WRITER_FUNCTIONS + +namespace +{ + +void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) +{ + json.BeginObject(); + + json.WriteString("BlockCount"); + json.WriteNumber(stat.statistics.blockCount); + json.WriteString("BlockBytes"); + json.WriteNumber(stat.statistics.blockBytes); + json.WriteString("AllocationCount"); + json.WriteNumber(stat.statistics.allocationCount); + json.WriteString("AllocationBytes"); + json.WriteNumber(stat.statistics.allocationBytes); + json.WriteString("UnusedRangeCount"); + json.WriteNumber(stat.unusedRangeCount); + + if (stat.statistics.allocationCount > 1) + { + json.WriteString("AllocationSizeMin"); + json.WriteNumber(stat.allocationSizeMin); + json.WriteString("AllocationSizeMax"); + json.WriteNumber(stat.allocationSizeMax); + } + if (stat.unusedRangeCount > 1) + { + json.WriteString("UnusedRangeSizeMin"); + json.WriteNumber(stat.unusedRangeSizeMin); + json.WriteString("UnusedRangeSizeMax"); + json.WriteNumber(stat.unusedRangeSizeMax); + } + json.EndObject(); +} + +} // namespace + +#endif // _VMA_JSON_WRITER + +#ifndef _VMA_MAPPING_HYSTERESIS + +class VmaMappingHysteresis +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis) +public: + VmaMappingHysteresis() = default; + + uint32_t GetExtraMapping() const { return m_ExtraMapping; } + + // Call when Map was called. + // Returns true if switched to extra +1 mapping reference count. + bool PostMap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) + { + m_ExtraMapping = 1; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + + // Call when Unmap was called. + void PostUnmap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + ++m_MajorCounter; + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was made from the memory block. + void PostAlloc() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + ++m_MajorCounter; + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was freed from the memory block. + // Returns true if switched to extra -1 mapping reference count. + bool PostFree() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && + m_MajorCounter > m_MinorCounter + 1) + { + m_ExtraMapping = 0; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + +private: + static constexpr int32_t COUNTER_MIN_EXTRA_MAPPING = 7; + + uint32_t m_MinorCounter = 0; + uint32_t m_MajorCounter = 0; + uint32_t m_ExtraMapping = 0; // 0 or 1. + + void PostMinorCounter() + { + if(m_MinorCounter < m_MajorCounter) + { + ++m_MinorCounter; + } + else if(m_MajorCounter > 0) + { + --m_MajorCounter; + --m_MinorCounter; + } + } +}; + +#endif // _VMA_MAPPING_HYSTERESIS + +#if VMA_EXTERNAL_MEMORY_WIN32 +class VmaWin32Handle +{ +public: + VmaWin32Handle() noexcept : m_hHandle(VMA_NULL) { } + explicit VmaWin32Handle(HANDLE hHandle) noexcept + : m_hHandle(hHandle) + , m_IsNTHandle(IsNTHandle(hHandle)) + { + } + ~VmaWin32Handle() noexcept { if (m_hHandle != VMA_NULL && m_IsNTHandle) { ::CloseHandle(m_hHandle); } } + VMA_CLASS_NO_COPY_NO_MOVE(VmaWin32Handle) + +public: + // Strengthened + VkResult GetHandle(VkDevice device, VkDeviceMemory memory, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE hTargetProcess, bool useMutex, HANDLE* pHandle) noexcept + { + *pHandle = VMA_NULL; + // Try to get handle first. + VkResult res = VK_SUCCESS; + if (m_hHandle == VMA_NULL) + { + VmaMutexLockWrite lock(m_Mutex, useMutex); + if (m_hHandle == VMA_NULL) + { + res = Create(device, memory, pvkGetMemoryWin32HandleKHR, handleType, &m_hHandle); + if (res != VK_SUCCESS) { + m_hHandle = VMA_NULL; + return res; + } + m_IsNTHandle = IsNTHandle(m_hHandle); + } + } + if (res == VK_SUCCESS) { + // KMT handle is returned as is. + *pHandle = m_IsNTHandle ? Duplicate(hTargetProcess) : m_hHandle; + } + return res; + } + + operator bool() const noexcept { return m_hHandle != VMA_NULL; } +private: + // Not atomic + static VkResult Create(VkDevice device, VkDeviceMemory memory, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE* pHandle) noexcept + { + VkResult res = VK_ERROR_FEATURE_NOT_PRESENT; + if (pvkGetMemoryWin32HandleKHR != VMA_NULL) + { + VkMemoryGetWin32HandleInfoKHR handleInfo{ }; + handleInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR; + handleInfo.memory = memory; + handleInfo.handleType = handleType; + res = pvkGetMemoryWin32HandleKHR(device, &handleInfo, pHandle); + } + return res; + } + HANDLE Duplicate(HANDLE hTargetProcess = VMA_NULL) const noexcept + { + if (!m_hHandle) + return m_hHandle; + + HANDLE hCurrentProcess = ::GetCurrentProcess(); + HANDLE hDupHandle = VMA_NULL; + if (!::DuplicateHandle(hCurrentProcess, m_hHandle, hTargetProcess ? hTargetProcess : hCurrentProcess, &hDupHandle, 0, FALSE, DUPLICATE_SAME_ACCESS)) + { + VMA_ASSERT(0 && "Failed to duplicate handle."); + } + return hDupHandle; + } + static bool IsNTHandle(HANDLE hHandle) noexcept + { + DWORD flags = 0; + return (hHandle != VMA_NULL) ? (::GetHandleInformation(hHandle, &flags) != 0) : false; + } +private: + HANDLE m_hHandle; + VMA_RW_MUTEX m_Mutex; // Protects access m_Handle + bool m_IsNTHandle = false; // True if m_Handle is NT handle, false if it's a KMT handle. +}; +#else +class VmaWin32Handle +{ + // ABI compatibility + void* placeholder = VMA_NULL; + VMA_RW_MUTEX placeholder2; + bool placeholder3 = false; +}; +#endif // VMA_EXTERNAL_MEMORY_WIN32 + + +#ifndef _VMA_DEVICE_MEMORY_BLOCK +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, #VmaAllocation), assigned and free. + +Thread-safety: +- Access to m_pMetadata must be externally synchronized. +- Map, Unmap, Bind* are synchronized internally. +*/ +class VmaDeviceMemoryBlock +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock) +public: + VmaBlockMetadata* m_pMetadata; + + explicit VmaDeviceMemoryBlock(VmaAllocator hAllocator); + ~VmaDeviceMemoryBlock(); + + // Always call after construction. + void Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity); + // Always call before destruction. + void Destroy(VmaAllocator allocator); + + VmaPool GetParentPool() const { return m_hParentPool; } + VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + uint32_t GetId() const { return m_Id; } + void* GetMappedData() const { return m_pMappedData; } + uint32_t GetMapRefCount() const { return m_MapCount; } + + // Call when allocation/free was made from m_pMetadata. + // Used for m_MappingHysteresis. + void PostAlloc(VmaAllocator hAllocator); + void PostFree(VmaAllocator hAllocator); + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + VkResult CheckCorruption(VmaAllocator hAllocator); + + // ppData can be null. + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + void Unmap(VmaAllocator hAllocator, uint32_t count); + + VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + + VkResult BindBufferMemory( + VmaAllocator hAllocator, + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocator hAllocator, + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); +#if VMA_EXTERNAL_MEMORY_WIN32 + VkResult CreateWin32Handle( + const VmaAllocator hAllocator, + PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE hTargetProcess, + HANDLE* pHandle)noexcept; +#endif // VMA_EXTERNAL_MEMORY_WIN32 +private: + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + uint32_t m_MemoryTypeIndex; + uint32_t m_Id; + VkDeviceMemory m_hMemory; + + /* + Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ + VMA_MUTEX m_MapAndBindMutex; + VmaMappingHysteresis m_MappingHysteresis; + uint32_t m_MapCount; + void* m_pMappedData; + + VmaWin32Handle m_Handle; +}; +#endif // _VMA_DEVICE_MEMORY_BLOCK + +#ifndef _VMA_ALLOCATION_T +struct VmaAllocationExtraData +{ + void* m_pMappedData = VMA_NULL; // Not null means memory is mapped. + VmaWin32Handle m_Handle; +}; + +struct VmaAllocation_T +{ + friend struct VmaDedicatedAllocationListItemTraits; + + enum FLAGS + { + FLAG_PERSISTENT_MAP = 0x01, + FLAG_MAPPING_ALLOWED = 0x02, + }; + +public: + enum ALLOCATION_TYPE + { + ALLOCATION_TYPE_NONE, + ALLOCATION_TYPE_BLOCK, + ALLOCATION_TYPE_DEDICATED, + }; + + // This struct is allocated using VmaPoolAllocator. + explicit VmaAllocation_T(bool mappingAllowed); + ~VmaAllocation_T(); + + void InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped); + // pMappedData not null means allocation is created with MAPPED flag. + void InitDedicatedAllocation( + VmaAllocator allocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size); + void Destroy(VmaAllocator allocator); + + ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } + VkDeviceSize GetAlignment() const { return m_Alignment; } + VkDeviceSize GetSize() const { return m_Size; } + void* GetUserData() const { return m_pUserData; } + const char* GetName() const { return m_pName; } + VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } + + VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } + bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } + + void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } + void SetName(VmaAllocator hAllocator, const char* pName); + void FreeName(VmaAllocator hAllocator); + uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); + VmaAllocHandle GetAllocHandle() const; + VkDeviceSize GetOffset() const; + VmaPool GetParentPool() const; + VkDeviceMemory GetMemory() const; + void* GetMappedData() const; + + void BlockAllocMap(); + void BlockAllocUnmap(); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + void DedicatedAllocUnmap(VmaAllocator hAllocator); + +#if VMA_STATS_STRING_ENABLED + VmaBufferImageUsage GetBufferImageUsage() const { return m_BufferImageUsage; } + void InitBufferUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5) + { + VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN); + m_BufferImageUsage = VmaBufferImageUsage(createInfo, useKhrMaintenance5); + } + void InitImageUsage(const VkImageCreateInfo &createInfo) + { + VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN); + m_BufferImageUsage = VmaBufferImageUsage(createInfo); + } + void PrintParameters(class VmaJsonWriter& json) const; +#endif + +#if VMA_EXTERNAL_MEMORY_WIN32 + VkResult GetWin32Handle(VmaAllocator hAllocator, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE hTargetProcess, HANDLE* hHandle) noexcept; +#endif // VMA_EXTERNAL_MEMORY_WIN32 + +private: + // Allocation out of VmaDeviceMemoryBlock. + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; + VmaAllocHandle m_AllocHandle; + }; + // Allocation for an object that has its own private VkDeviceMemory. + struct DedicatedAllocation + { + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + VkDeviceMemory m_hMemory; + VmaAllocationExtraData* m_ExtraData; + VmaAllocation_T* m_Prev; + VmaAllocation_T* m_Next; + }; + union + { + // Allocation out of VmaDeviceMemoryBlock. + BlockAllocation m_BlockAllocation; + // Allocation for an object that has its own private VkDeviceMemory. + DedicatedAllocation m_DedicatedAllocation; + }; + + VkDeviceSize m_Alignment; + VkDeviceSize m_Size; + void* m_pUserData; + char* m_pName; + uint32_t m_MemoryTypeIndex; + uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_SuballocationType; // VmaSuballocationType + // Reference counter for vmaMapMemory()/vmaUnmapMemory(). + uint8_t m_MapCount; + uint8_t m_Flags; // enum FLAGS +#if VMA_STATS_STRING_ENABLED + VmaBufferImageUsage m_BufferImageUsage; // 0 if unknown. +#endif + + void EnsureExtraData(VmaAllocator hAllocator); +}; +#endif // _VMA_ALLOCATION_T + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS +struct VmaDedicatedAllocationListItemTraits +{ + typedef VmaAllocation_T ItemType; + + static ItemType* GetPrev(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType* GetNext(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } + static ItemType*& AccessPrev(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType*& AccessNext(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } +}; +#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST +/* +Stores linked list of VmaAllocation_T objects. +Thread-safe, synchronized internally. +*/ +class VmaDedicatedAllocationList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList) +public: + VmaDedicatedAllocationList() = default; + ~VmaDedicatedAllocationList(); + + void Init(bool useMutex) { m_UseMutex = useMutex; } + bool Validate(); + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + void AddStatistics(VmaStatistics& inoutStats); +#if VMA_STATS_STRING_ENABLED + // Writes JSON array with the list of allocations. + void BuildStatsString(VmaJsonWriter& json); +#endif + + bool IsEmpty(); + void Register(VmaAllocation alloc); + void Unregister(VmaAllocation alloc); + +private: + typedef VmaIntrusiveLinkedList DedicatedAllocationLinkedList; + + bool m_UseMutex = true; + VMA_RW_MUTEX m_Mutex; + DedicatedAllocationLinkedList m_AllocationList; +}; + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS + +VmaDedicatedAllocationList::~VmaDedicatedAllocationList() +{ + VMA_HEAVY_ASSERT(Validate()); + + if (!m_AllocationList.IsEmpty()) + { + VMA_ASSERT_LEAK(false && "Unfreed dedicated allocations found!"); + } +} + +bool VmaDedicatedAllocationList::Validate() +{ + const size_t declaredCount = m_AllocationList.GetCount(); + size_t actualCount = 0; + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + ++actualCount; + } + VMA_VALIDATE(actualCount == declaredCount); + + return true; +} + +void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); + } +} + +void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + + const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); + inoutStats.blockCount += allocCount; + inoutStats.allocationCount += allocCount; + + for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size; + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + json.BeginArray(); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + json.BeginObject(true); + alloc->PrintParameters(json); + json.EndObject(); + } + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaDedicatedAllocationList::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + return m_AllocationList.IsEmpty(); +} + +void VmaDedicatedAllocationList::Register(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.PushBack(alloc); +} + +void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.Remove(alloc); +} +#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS +#endif // _VMA_DEDICATED_ALLOCATION_LIST + +#ifndef _VMA_SUBALLOCATION +/* +Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as +allocated memory block or free. +*/ +struct VmaSuballocation +{ + VkDeviceSize offset; + VkDeviceSize size; + void* userData; + VmaSuballocationType type; +}; + +// Comparator for offsets. +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset < rhs.offset; + } +}; + +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset > rhs.offset; + } +}; + +struct VmaSuballocationItemSizeLess +{ + bool operator()(const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { + return lhs->size < rhs->size; + } + + bool operator()(const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { + return lhs->size < rhsSize; + } +}; +#endif // _VMA_SUBALLOCATION + +#ifndef _VMA_ALLOCATION_REQUEST +/* +Parameters of planned allocation inside a VmaDeviceMemoryBlock. +item points to a FREE suballocation. +*/ +struct VmaAllocationRequest +{ + VmaAllocHandle allocHandle; + VkDeviceSize size; + VmaSuballocationList::iterator item; + void* customData; + uint64_t algorithmData; + VmaAllocationRequestType type; +}; +#endif // _VMA_ALLOCATION_REQUEST + +#ifndef _VMA_BLOCK_METADATA +/* +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata) +public: + // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. + VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata() = default; + + virtual void Init(VkDeviceSize size) { m_Size = size; } + bool IsVirtual() const { return m_IsVirtual; } + VkDeviceSize GetSize() const { return m_Size; } + + // Validates all data structures inside this object. If not valid, returns false. + virtual bool Validate() const = 0; + virtual size_t GetAllocationCount() const = 0; + virtual size_t GetFreeRegionsCount() const = 0; + virtual VkDeviceSize GetSumFreeSize() const = 0; + // Returns true if this block is empty - contains only single free suballocation. + virtual bool IsEmpty() const = 0; + virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; + virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; + virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; + + virtual VmaAllocHandle GetAllocationListBegin() const = 0; + virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; + virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; + + // Shouldn't modify blockCount. + virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; + virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; +#endif + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + virtual bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual VkResult CheckCorruption(const void* pBlockData) = 0; + + // Makes actual allocation based on request. Request must already be checked and valid. + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) = 0; + + // Frees suballocation assigned to given memory region. + virtual void Free(VmaAllocHandle allocHandle) = 0; + + // Frees all allocations. + // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! + virtual void Clear() = 0; + + virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; + virtual void DebugLogAllAllocations() const = 0; + +protected: + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); } + + void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; +#if VMA_STATS_STRING_ENABLED + // mapRefCount == UINT32_MAX means unspecified. + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const; + static void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size); + static void PrintDetailedMap_End(class VmaJsonWriter& json); +#endif + +private: + VkDeviceSize m_Size; + const VkAllocationCallbacks* m_pAllocationCallbacks; + const VkDeviceSize m_BufferImageGranularity; + const bool m_IsVirtual; +}; + +#ifndef _VMA_BLOCK_METADATA_FUNCTIONS +VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : m_Size(0), + m_pAllocationCallbacks(pAllocationCallbacks), + m_BufferImageGranularity(bufferImageGranularity), + m_IsVirtual(isVirtual) {} + +void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + if (IsVirtual()) + { + VMA_LEAK_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p", offset, size, userData); + } + else + { + VMA_ASSERT(userData != VMA_NULL); + VmaAllocation allocation = reinterpret_cast(userData); + + userData = allocation->GetUserData(); + const char* name = allocation->GetName(); + +#if VMA_STATS_STRING_ENABLED + VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %s; Usage: %" PRIu64, + offset, size, userData, name ? name : "vma_empty", + VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], + (uint64_t)allocation->GetBufferImageUsage().Value); +#else + VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %u", + offset, size, userData, name ? name : "vma_empty", + (unsigned)allocation->GetSuballocationType()); +#endif // VMA_STATS_STRING_ENABLED + } + +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const +{ + json.WriteString("TotalBytes"); + json.WriteNumber(GetSize()); + + json.WriteString("UnusedBytes"); + json.WriteNumber(unusedBytes); + + json.WriteString("Allocations"); + json.WriteNumber((uint64_t)allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber((uint64_t)unusedRangeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); +} + +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + if (IsVirtual()) + { + json.WriteString("Size"); + json.WriteNumber(size); + if (userData) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(userData); + json.EndString(); + } + } + else + { + ((VmaAllocation)userData)->PrintParameters(json); + } + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size) +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); + + json.WriteString("Size"); + json.WriteNumber(size); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) +{ + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_BLOCK_METADATA_FUNCTIONS +#endif // _VMA_BLOCK_METADATA + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY +// Before deleting object of this class remember to call 'Destroy()' +class VmaBlockBufferImageGranularity final +{ +public: + struct ValidationContext + { + const VkAllocationCallbacks* allocCallbacks; + uint16_t* pageAllocs; + }; + + explicit VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); + ~VmaBlockBufferImageGranularity(); + + bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } + + void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); + // Before destroying object you must call free it's memory + void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); + + void RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const; + + bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const; + + void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); + void FreePages(VkDeviceSize offset, VkDeviceSize size); + void Clear(); + + ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, + bool isVirutal) const; + bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; + bool FinishValidation(ValidationContext& ctx) const; + +private: + static constexpr uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; + + struct RegionInfo + { + uint8_t allocType; + uint16_t allocCount; + }; + + VkDeviceSize m_BufferImageGranularity; + uint32_t m_RegionCount; + RegionInfo* m_RegionInfo; + + uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } + uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } + + uint32_t OffsetToPageIndex(VkDeviceSize offset) const; + static void AllocPage(RegionInfo& page, uint8_t allocType); +}; + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) + : m_BufferImageGranularity(bufferImageGranularity), + m_RegionCount(0), + m_RegionInfo(VMA_NULL) {} + +VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() +{ + VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); +} + +void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) +{ + if (IsEnabled()) + { + m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); + m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); + } +} + +void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) +{ + if (m_RegionInfo) + { + vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); + m_RegionInfo = VMA_NULL; + } +} + +void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const +{ + if (m_BufferImageGranularity > 1 && + m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) + { + if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); + inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); + } + } +} + +bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(inOutAllocOffset); + if (m_RegionInfo[startPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) + { + inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity); + if (blockSize < allocSize + inOutAllocOffset - blockOffset) + return true; + ++startPage; + } + uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize); + if (endPage != startPage && + m_RegionInfo[endPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) + { + return true; + } + } + return false; +} + +void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + AllocPage(m_RegionInfo[startPage], allocType); + + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + AllocPage(m_RegionInfo[endPage], allocType); + } +} + +void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + --m_RegionInfo[startPage].allocCount; + if (m_RegionInfo[startPage].allocCount == 0) + m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + { + --m_RegionInfo[endPage].allocCount; + if (m_RegionInfo[endPage].allocCount == 0) + m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + } + } +} + +void VmaBlockBufferImageGranularity::Clear() +{ + if (m_RegionInfo) + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); +} + +VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( + const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const +{ + ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; + if (!isVirutal && IsEnabled()) + { + ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); + memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); + } + return ctx; +} + +bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, + VkDeviceSize offset, VkDeviceSize size) const +{ + if (IsEnabled()) + { + uint32_t start = GetStartPage(offset); + ++ctx.pageAllocs[start]; + VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); + + uint32_t end = GetEndPage(offset, size); + if (start != end) + { + ++ctx.pageAllocs[end]; + VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); + } + } + return true; +} + +bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const +{ + // Check proper page structure + if (IsEnabled()) + { + VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); + + for (uint32_t page = 0; page < m_RegionCount; ++page) + { + VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); + } + vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); + ctx.pageAllocs = VMA_NULL; + } + return true; +} + +uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const +{ + return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); +} + +void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) +{ + // When current alloc type is free then it can be overridden by new type + if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) + page.allocType = allocType; + + ++page.allocCount; +} +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY + +#ifndef _VMA_BLOCK_METADATA_LINEAR +/* +Allocations and their references in internal data structure look like this: + +if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): + + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] +GetSize() +-------+ + +*/ +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear) +public: + VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + ~VmaBlockMetadata_Linear() override = default; + + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return GetAllocationCount() == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + size_t GetAllocationCount() const override; + size_t GetFreeRegionsCount() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + /* + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector> SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { + SECOND_VECTOR_EMPTY, + /* + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ + SECOND_VECTOR_RING_BUFFER, + /* + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ + SECOND_VECTOR_DOUBLE_STACK, + }; + + VkDeviceSize m_SumFreeSize; + SuballocationVectorType m_Suballocations0, m_Suballocations1; + uint32_t m_1stVectorIndex; + SECOND_VECTOR_MODE m_2ndVectorMode; + // Number of items in 1st vector with hAllocation = null at the beginning. + size_t m_1stNullItemsBeginCount; + // Number of other items in 1st vector with hAllocation = null somewhere in the middle. + size_t m_1stNullItemsMiddleCount; + // Number of items in 2nd vector with hAllocation = null. + size_t m_2ndNullItemsCount; + + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + + VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; + bool ShouldCompact1st() const; + void CleanupAfterFree(); + + bool CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + bool CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), + m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) {} + +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + m_SumFreeSize = size; +} + +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); + VMA_VALIDATE(!suballocations1st.empty() || + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + + if (!suballocations1st.empty()) + { + // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. + VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE); + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + if (!suballocations2nd.empty()) + { + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + + VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); + VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); + + VkDeviceSize sumUsedSize = 0; + const size_t suballoc1stCount = suballocations1st.size(); + const VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize offset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && + suballoc.userData == VMA_NULL); + } + + size_t nullItem1stCount = m_1stNullItemsBeginCount; + + for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem1stCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + VMA_VALIDATE(offset <= GetSize()); + VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); + + return true; +} + +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ + return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + + AccessSuballocations2nd().size() - m_2ndNullItemsCount; +} + +size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return SIZE_MAX; +} + +void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + if (lastOffset < freeSpace2ndTo1stEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + if (lastOffset < freeSpace1stTo2ndEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to size. + if (lastOffset < size) + { + const VkDeviceSize unusedRangeSize = size - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } +} + +void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VkDeviceSize size = GetSize(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.blockCount++; + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size - m_SumFreeSize; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = size; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + // FIRST PASS + + size_t unusedRangeCount = 0; + VkDeviceSize usedBytes = 0; + + VkDeviceSize lastOffset = 0; + + size_t alloc2ndCount = 0; + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + size_t alloc1stCount = 0; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc1stCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = size; + } + } + } + + const VkDeviceSize unusedBytes = size - usedBytes; + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + + // SECOND PASS + lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + nextAlloc1stIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Linear::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + + if(allocSize > GetSize()) + return false; + + pAllocationRequest->size = allocSize; + return upperAddress ? + CreateAllocationRequest_UpperAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest); +} + +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + VMA_ASSERT(!IsVirtual()); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Linear::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; + const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; + + switch (request.type) + { + case VmaAllocationRequestType::UpperAddress: + { + VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + suballocations2nd.push_back(newSuballoc); + m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + VMA_ASSERT(suballocations1st.empty() || + offset >= suballocations1st.back().offset + suballocations1st.back().size); + // Check if it fits before the end of the block. + VMA_ASSERT(offset + request.size <= GetSize()); + + suballocations1st.push_back(newSuballoc); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. + VMA_ASSERT(!suballocations1st.empty() && + offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch (m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); + } + + suballocations2nd.push_back(newSuballoc); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + + m_SumFreeSize -= newSuballoc.size; +} + +void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; + + if (!suballocations1st.empty()) + { + // First allocation: Mark it as next empty at the beginning. + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if (firstSuballoc.offset == offset) + { + firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + firstSuballoc.userData = VMA_NULL; + m_SumFreeSize += firstSuballoc.size; + ++m_1stNullItemsBeginCount; + CleanupAfterFree(); + return; + } + } + + // Last allocation in 2-part ring buffer or top of upper stack (same logic). + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations2nd.pop_back(); + CleanupAfterFree(); + return; + } + } + // Last allocation in 1st vector. + else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations1st.pop_back(); + CleanupAfterFree(); + return; + } + } + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the middle of 1st vector. + { + const SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Item from the middle of 2nd vector. + const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_2ndNullItemsCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); +} + +void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + outInfo.offset = (VkDeviceSize)allocHandle - 1; + VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); + outInfo.size = suballoc.size; + outInfo.pUserData = suballoc.userData; +} + +void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return 0; +} + +void VmaBlockMetadata_Linear::Clear() +{ + m_SumFreeSize = GetSize(); + m_Suballocations0.clear(); + m_Suballocations1.clear(); + // Leaving m_1stVectorIndex unchanged - it doesn't matter. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; +} + +void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); + suballoc.userData = userData; +} + +void VmaBlockMetadata_Linear::DebugLogAllAllocations() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); + + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); +} + +VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the 1st vector. + { + SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + return const_cast(*it); + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + return const_cast(*it); + } + } + + VMA_ASSERT(0 && "Allocation not found in linear allocator!"); + return const_cast(suballocations1st.back()); // Should never occur. +} + +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ + const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + const size_t suballocCount = AccessSuballocations1st().size(); + return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; +} + +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (IsEmpty()) + { + suballocations1st.clear(); + suballocations2nd.clear(); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + else + { + const size_t suballoc1stCount = suballocations1st.size(); + const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + VMA_ASSERT(nullItem1stCount <= suballoc1stCount); + + // Find more null items at the beginning of 1st vector. + while (m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + + // Find more null items at the end of 1st vector. + while (m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_1stNullItemsMiddleCount; + suballocations1st.pop_back(); + } + + // Find more null items at the end of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + suballocations2nd.pop_back(); + } + + // Find more null items at the beginning of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + VmaVectorRemove(suballocations2nd, 0); + } + + if (ShouldCompact1st()) + { + const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; + size_t srcIndex = m_1stNullItemsBeginCount; + for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++srcIndex; + } + if (dstIndex != srcIndex) + { + suballocations1st[dstIndex] = suballocations1st[srcIndex]; + } + ++srcIndex; + } + suballocations1st.resize(nonNullItemCount); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + } + + // 2nd vector became empty. + if (suballocations2nd.empty()) + { + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + + // 1st vector became empty. + if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { + suballocations1st.clear(); + m_1stNullItemsBeginCount = 0; + + if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + // Swap 1st with 2nd. Now 2nd is empty. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsMiddleCount = m_2ndNullItemsCount; + while (m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + m_2ndNullItemsCount = 0; + m_1stVectorIndex ^= 1; + } + } + } + + VMA_HEAVY_ASSERT(Validate()); +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + // Try to allocate at the end of 1st vector. + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? + suballocations2nd.back().offset : blockSize; + + // There is enough free space at the end after alignment. + if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on previous page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item, customData unused. + pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; + return true; + } + } + + // Wrap-around to end of 2nd vector. Try to allocate there, watching for the + // beginning of 1st vector as the end of free space. + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(!suballocations1st.empty()); + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + size_t index1st = m_1stNullItemsBeginCount; + + // There is enough free space at the end after alignment. + if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) || + (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset)) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) + { + for (size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; + // pAllocationRequest->item, customData unused. + return true; + } + } + + return false; +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); + return false; + } + + // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). + if (allocSize > blockSize) + { + return false; + } + VkDeviceSize resultBaseOffset = blockSize - allocSize; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset - allocSize; + if (allocSize > lastSuballoc.offset) + { + return false; + } + } + + // Start from offset equal to end of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + // Apply debugMargin at the end. + if (debugMargin > 0) + { + if (resultOffset < debugMargin) + { + return false; + } + resultOffset -= debugMargin; + } + + // Apply alignment. + resultOffset = VmaAlignDown(resultOffset, allocAlignment); + + // Check next suballocations from 2nd for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); + } + } + + // There is enough free space. + const VkDeviceSize endOf1st = !suballocations1st.empty() ? + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if (endOf1st + debugMargin <= resultOffset) + { + // Check previous suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (bufferImageGranularity > 1) + { + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item unused. + pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; + return true; + } + + return false; +} +#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_LINEAR + +#ifndef _VMA_BLOCK_METADATA_TLSF +// To not search current larger region if first allocation won't succeed and skip to smaller range +// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest(). +// When fragmentation and reusal of previous blocks doesn't matter then use with +// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible. +class VmaBlockMetadata_TLSF : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF) +public: + VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + ~VmaBlockMetadata_TLSF() override; + + size_t GetAllocationCount() const override { return m_AllocCount; } + size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } + VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } + bool IsEmpty() const override { return m_NullBlock->offset == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + // According to original paper it should be preferable 4 or 5: + // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" + // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf + static constexpr uint8_t SECOND_LEVEL_INDEX = 5; + static constexpr uint16_t SMALL_BUFFER_SIZE = 256; + static constexpr uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; + static constexpr uint8_t MEMORY_CLASS_SHIFT = 7; + static constexpr uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT; + + class Block + { + public: + VkDeviceSize offset; + VkDeviceSize size; + Block* prevPhysical; + Block* nextPhysical; + + void MarkFree() { prevFree = VMA_NULL; } + void MarkTaken() { prevFree = this; } + bool IsFree() const { return prevFree != this; } + void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } + Block*& PrevFree() { return prevFree; } + Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } + + private: + Block* prevFree; // Address of the same block here indicates that block is taken + union + { + Block* nextFree; + void* userData; + }; + }; + + size_t m_AllocCount; + // Total number of free blocks besides null block + size_t m_BlocksFreeCount; + // Total size of free blocks excluding null block + VkDeviceSize m_BlocksFreeSize; + uint32_t m_IsFreeBitmap; + uint8_t m_MemoryClasses; + uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; + uint32_t m_ListsCount; + /* + * 0: 0-3 lists for small buffers + * 1+: 0-(2^SLI-1) lists for normal buffers + */ + Block** m_FreeList; + VmaPoolAllocator m_BlockAllocator; + Block* m_NullBlock; + VmaBlockBufferImageGranularity m_GranularityHandler; + + static uint8_t SizeToMemoryClass(VkDeviceSize size); + uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; + uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; + uint32_t GetListIndex(VkDeviceSize size) const; + + void RemoveFreeBlock(Block* block); + void InsertFreeBlock(Block* block); + void MergeBlock(Block* block, Block* prev); + + Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; + bool CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_AllocCount(0), + m_BlocksFreeCount(0), + m_BlocksFreeSize(0), + m_IsFreeBitmap(0), + m_MemoryClasses(0), + m_ListsCount(0), + m_FreeList(VMA_NULL), + m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), + m_NullBlock(VMA_NULL), + m_GranularityHandler(bufferImageGranularity) {} + +VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() +{ + if (m_FreeList) + vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); + m_GranularityHandler.Destroy(GetAllocationCallbacks()); +} + +void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + if (!IsVirtual()) + m_GranularityHandler.Init(GetAllocationCallbacks(), size); + + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = size; + m_NullBlock->offset = 0; + m_NullBlock->prevPhysical = VMA_NULL; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + uint8_t memoryClass = SizeToMemoryClass(size); + uint16_t sli = SizeToSecondIndex(size, memoryClass); + m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1; + if (IsVirtual()) + m_ListsCount += 1UL << SECOND_LEVEL_INDEX; + else + m_ListsCount += 4; + + m_MemoryClasses = memoryClass + uint8_t(2); + memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); + + m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); +} + +bool VmaBlockMetadata_TLSF::Validate() const +{ + VMA_VALIDATE(GetSumFreeSize() <= GetSize()); + + VkDeviceSize calculatedSize = m_NullBlock->size; + VkDeviceSize calculatedFreeSize = m_NullBlock->size; + size_t allocCount = 0; + size_t freeCount = 0; + + // Check integrity of free lists + for (uint32_t list = 0; list < m_ListsCount; ++list) + { + Block* block = m_FreeList[list]; + if (block != VMA_NULL) + { + VMA_VALIDATE(block->IsFree()); + VMA_VALIDATE(block->PrevFree() == VMA_NULL); + while (block->NextFree()) + { + VMA_VALIDATE(block->NextFree()->IsFree()); + VMA_VALIDATE(block->NextFree()->PrevFree() == block); + block = block->NextFree(); + } + } + } + + VkDeviceSize nextOffset = m_NullBlock->offset; + auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); + + VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); + if (m_NullBlock->prevPhysical) + { + VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); + } + // Check all blocks + for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) + { + VMA_VALIDATE(prev->offset + prev->size == nextOffset); + nextOffset = prev->offset; + calculatedSize += prev->size; + + uint32_t listIndex = GetListIndex(prev->size); + if (prev->IsFree()) + { + ++freeCount; + // Check if free block belongs to free list + Block* freeBlock = m_FreeList[listIndex]; + VMA_VALIDATE(freeBlock != VMA_NULL); + + bool found = false; + do + { + if (freeBlock == prev) + found = true; + + freeBlock = freeBlock->NextFree(); + } while (!found && freeBlock != VMA_NULL); + + VMA_VALIDATE(found); + calculatedFreeSize += prev->size; + } + else + { + ++allocCount; + // Check if taken block is not on a free list + Block* freeBlock = m_FreeList[listIndex]; + while (freeBlock) + { + VMA_VALIDATE(freeBlock != prev); + freeBlock = freeBlock->NextFree(); + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); + } + } + + if (prev->prevPhysical) + { + VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); + } + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); + } + + VMA_VALIDATE(nextOffset == 0); + VMA_VALIDATE(calculatedSize == GetSize()); + VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); + VMA_VALIDATE(allocCount == m_AllocCount); + VMA_VALIDATE(freeCount == m_BlocksFreeCount); + + return true; +} + +void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + if (m_NullBlock->size > 0) + VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); + + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree()) + VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); + else + VmaAddDetailedStatisticsAllocation(inoutStats, block->size); + } +} + +void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const +{ + size_t blockCount = m_AllocCount + m_BlocksFreeCount; + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(blockCount, allocator); + + size_t i = blockCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + blockList[--i] = block; + } + VMA_ASSERT(i == 0); + + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); + + PrintDetailedMap_Begin(json, + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount); + + for (; i < blockCount; ++i) + { + Block* block = blockList[i]; + if (block->IsFree()) + PrintDetailedMap_UnusedRange(json, block->offset, block->size); + else + PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData()); + } + if (m_NullBlock->size > 0) + PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); + + PrintDetailedMap_End(json); +} +#endif + +bool VmaBlockMetadata_TLSF::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // For small granularity round up + if (!IsVirtual()) + m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); + + allocSize += GetDebugMargin(); + // Quick check for too small pool + if (allocSize > GetSumFreeSize()) + return false; + + // If no free blocks in pool then check only null block + if (m_BlocksFreeCount == 0) + return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); + + // Round up to the next block + VkDeviceSize sizeForNextList = allocSize; + VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1U << SECOND_LEVEL_INDEX : 4U)); + if (allocSize > SMALL_BUFFER_SIZE) + { + sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)); + } + else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep) + sizeForNextList = SMALL_BUFFER_SIZE + 1; + else + sizeForNextList += smallSizeStep; + + uint32_t nextListIndex = m_ListsCount; + uint32_t prevListIndex = m_ListsCount; + Block* nextListBlock = VMA_NULL; + Block* prevListBlock = VMA_NULL; + + // Check blocks according to strategies + if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) + { + // Quick check for larger block first + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // If not fitted then null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Null block failed, search larger bucket + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // Failed again, check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) + { + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) + { + // Perform search from the start + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(m_BlocksFreeCount, allocator); + + size_t i = m_BlocksFreeCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree() && block->size >= allocSize) + blockList[--i] = block; + } + + for (; i < m_BlocksFreeCount; ++i) + { + Block& block = *blockList[i]; + if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Whole range searched, no more memory + return false; + } + else + { + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + + // Worst case, full search has to be done + while (++nextListIndex < m_ListsCount) + { + nextListBlock = m_FreeList[nextListIndex]; + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + + // No more memory sadly + return false; +} + +VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (!block->IsFree()) + { + if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_TLSF::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); + + // Get block and pop it from the free list + Block* currentBlock = (Block*)request.allocHandle; + VkDeviceSize offset = request.algorithmData; + VMA_ASSERT(currentBlock != VMA_NULL); + VMA_ASSERT(currentBlock->offset <= offset); + + if (currentBlock != m_NullBlock) + RemoveFreeBlock(currentBlock); + + VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize missingAlignment = offset - currentBlock->offset; + + // Append missing alignment to prev block or create new one + if (missingAlignment) + { + Block* prevBlock = currentBlock->prevPhysical; + VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); + + if (prevBlock->IsFree() && prevBlock->size != debugMargin) + { + uint32_t oldList = GetListIndex(prevBlock->size); + prevBlock->size += missingAlignment; + // Check if new size crosses list bucket + if (oldList != GetListIndex(prevBlock->size)) + { + prevBlock->size -= missingAlignment; + RemoveFreeBlock(prevBlock); + prevBlock->size += missingAlignment; + InsertFreeBlock(prevBlock); + } + else + m_BlocksFreeSize += missingAlignment; + } + else + { + Block* newBlock = m_BlockAllocator.Alloc(); + currentBlock->prevPhysical = newBlock; + prevBlock->nextPhysical = newBlock; + newBlock->prevPhysical = prevBlock; + newBlock->nextPhysical = currentBlock; + newBlock->size = missingAlignment; + newBlock->offset = currentBlock->offset; + newBlock->MarkTaken(); + + InsertFreeBlock(newBlock); + } + + currentBlock->size -= missingAlignment; + currentBlock->offset += missingAlignment; + } + + VkDeviceSize size = request.size + debugMargin; + if (currentBlock->size == size) + { + if (currentBlock == m_NullBlock) + { + // Setup new null block + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = 0; + m_NullBlock->offset = currentBlock->offset + size; + m_NullBlock->prevPhysical = currentBlock; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->PrevFree() = VMA_NULL; + m_NullBlock->NextFree() = VMA_NULL; + currentBlock->nextPhysical = m_NullBlock; + currentBlock->MarkTaken(); + } + } + else + { + VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); + + // Create new free block + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = currentBlock->size - size; + newBlock->offset = currentBlock->offset + size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + currentBlock->nextPhysical = newBlock; + currentBlock->size = size; + + if (currentBlock == m_NullBlock) + { + m_NullBlock = newBlock; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + currentBlock->MarkTaken(); + } + else + { + newBlock->nextPhysical->prevPhysical = newBlock; + newBlock->MarkTaken(); + InsertFreeBlock(newBlock); + } + } + currentBlock->UserData() = userData; + + if (debugMargin > 0) + { + currentBlock->size -= debugMargin; + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = debugMargin; + newBlock->offset = currentBlock->offset + currentBlock->size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + newBlock->MarkTaken(); + currentBlock->nextPhysical->prevPhysical = newBlock; + currentBlock->nextPhysical = newBlock; + InsertFreeBlock(newBlock); + } + + if (!IsVirtual()) + m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, + currentBlock->offset, currentBlock->size); + ++m_AllocCount; +} + +void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) +{ + Block* block = (Block*)allocHandle; + Block* next = block->nextPhysical; + VMA_ASSERT(!block->IsFree() && "Block is already free!"); + + if (!IsVirtual()) + m_GranularityHandler.FreePages(block->offset, block->size); + --m_AllocCount; + + VkDeviceSize debugMargin = GetDebugMargin(); + if (debugMargin > 0) + { + RemoveFreeBlock(next); + MergeBlock(next, block); + block = next; + next = next->nextPhysical; + } + + // Try merging + Block* prev = block->prevPhysical; + if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) + { + RemoveFreeBlock(prev); + MergeBlock(block, prev); + } + + if (!next->IsFree()) + InsertFreeBlock(block); + else if (next == m_NullBlock) + MergeBlock(m_NullBlock, block); + else + { + RemoveFreeBlock(next); + MergeBlock(next, block); + InsertFreeBlock(next); + } +} + +void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); + outInfo.offset = block->offset; + outInfo.size = block->size; + outInfo.pUserData = block->UserData(); +} + +void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); + return block->UserData(); +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const +{ + if (m_AllocCount == 0) + return VK_NULL_HANDLE; + + for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + Block* startBlock = (Block*)prevAlloc; + VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); + + for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + Block* block = (Block*)alloc; + VMA_ASSERT(!block->IsFree() && "Incorrect block!"); + + if (block->prevPhysical) + return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; + return 0; +} + +void VmaBlockMetadata_TLSF::Clear() +{ + m_AllocCount = 0; + m_BlocksFreeCount = 0; + m_BlocksFreeSize = 0; + m_IsFreeBitmap = 0; + m_NullBlock->offset = 0; + m_NullBlock->size = GetSize(); + Block* block = m_NullBlock->prevPhysical; + m_NullBlock->prevPhysical = VMA_NULL; + while (block) + { + Block* prev = block->prevPhysical; + m_BlockAllocator.Free(block); + block = prev; + } + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); + memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); + m_GranularityHandler.Clear(); +} + +void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); + block->UserData() = userData; +} + +void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + if (!block->IsFree()) + DebugLogAllocation(block->offset, block->size, block->UserData()); +} + +uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) +{ + if (size > SMALL_BUFFER_SIZE) + return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT); + return 0; +} + +uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const +{ + if (memoryClass == 0) + { + if (IsVirtual()) + return static_cast((size - 1) / 8); + return static_cast((size - 1) / 64); + } + return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const +{ + if (memoryClass == 0) + return secondIndex; + + const uint32_t index = static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex; + if (IsVirtual()) + return index + (1 << SECOND_LEVEL_INDEX); + return index + 4; +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); +} + +void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(block->IsFree()); + + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block->PrevFree(); + if (block->PrevFree() != VMA_NULL) + block->PrevFree()->NextFree() = block->NextFree(); + else + { + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(m_FreeList[index] == block); + m_FreeList[index] = block->NextFree(); + if (block->NextFree() == VMA_NULL) + { + m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex); + if (m_InnerIsFreeBitmap[memClass] == 0) + m_IsFreeBitmap &= ~(1UL << memClass); + } + } + block->MarkTaken(); + block->UserData() = VMA_NULL; + --m_BlocksFreeCount; + m_BlocksFreeSize -= block->size; +} + +void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); + + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(index < m_ListsCount); + block->PrevFree() = VMA_NULL; + block->NextFree() = m_FreeList[index]; + m_FreeList[index] = block; + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block; + else + { + m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex; + m_IsFreeBitmap |= 1UL << memClass; + } + ++m_BlocksFreeCount; + m_BlocksFreeSize += block->size; +} + +void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) +{ + VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!"); + VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); + + block->offset = prev->offset; + block->size += prev->size; + block->prevPhysical = prev->prevPhysical; + if (block->prevPhysical) + block->prevPhysical->nextPhysical = block; + m_BlockAllocator.Free(prev); +} + +VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); + if (!innerFreeMap) + { + // Check higher levels for available blocks + uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1)); + if (!freeMap) + return VMA_NULL; // No more memory available + + // Find lowest free region + memoryClass = VMA_BITSCAN_LSB(freeMap); + innerFreeMap = m_InnerIsFreeBitmap[memoryClass]; + VMA_ASSERT(innerFreeMap != 0); + } + // Find lowest free subregion + listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap)); + VMA_ASSERT(m_FreeList[listIndex]); + return m_FreeList[listIndex]; +} + +bool VmaBlockMetadata_TLSF::CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(block.IsFree() && "Block is already taken!"); + + VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); + if (block.size < allocSize + alignedOffset - block.offset) + return false; + + // Check for granularity conflicts + if (!IsVirtual() && + m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType)) + return false; + + // Alloc successful + pAllocationRequest->type = VmaAllocationRequestType::TLSF; + pAllocationRequest->allocHandle = (VmaAllocHandle)█ + pAllocationRequest->size = allocSize - GetDebugMargin(); + pAllocationRequest->customData = (void*)allocType; + pAllocationRequest->algorithmData = alignedOffset; + + // Place block at the start of list if it's normal block + if (listIndex != m_ListsCount && block.PrevFree()) + { + block.PrevFree()->NextFree() = block.NextFree(); + if (block.NextFree()) + block.NextFree()->PrevFree() = block.PrevFree(); + block.PrevFree() = VMA_NULL; + block.NextFree() = m_FreeList[listIndex]; + m_FreeList[listIndex] = █ + if (block.NextFree()) + block.NextFree()->PrevFree() = █ + } + + return true; +} +#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_TLSF + +#ifndef _VMA_BLOCK_VECTOR +/* +Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific +Vulkan memory type. + +Synchronized internally with a mutex. +*/ +class VmaBlockVector +{ + friend struct VmaDefragmentationContext_T; + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector) +public: + VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext); + ~VmaBlockVector(); + + VmaAllocator GetAllocator() const { return m_hAllocator; } + VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + uint32_t GetAlgorithm() const { return m_Algorithm; } + bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } + float GetPriority() const { return m_Priority; } + const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } + // To be used only while the m_Mutex is locked. Used during defragmentation. + size_t GetBlockCount() const { return m_Blocks.size(); } + // To be used only while the m_Mutex is locked. Used during defragmentation. + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + VMA_RW_MUTEX &GetMutex() { return m_Mutex; } + + VkResult CreateMinBlocks(); + void AddStatistics(VmaStatistics& inoutStats); + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + bool IsEmpty(); + bool IsCorruptionDetectionEnabled() const; + + VkResult Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + void Free(VmaAllocation hAllocation); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + VkResult CheckCorruption(); + +private: + const VmaAllocator m_hAllocator; + const VmaPool m_hParentPool; + const uint32_t m_MemoryTypeIndex; + const VkDeviceSize m_PreferredBlockSize; + const size_t m_MinBlockCount; + const size_t m_MaxBlockCount; + const VkDeviceSize m_BufferImageGranularity; + const bool m_ExplicitBlockSize; + const uint32_t m_Algorithm; + const float m_Priority; + const VkDeviceSize m_MinAllocationAlignment; + + void* const m_pMemoryAllocateNext; + VMA_RW_MUTEX m_Mutex; + // Incrementally sorted by sumFreeSize, ascending. + VmaVector> m_Blocks; + uint32_t m_NextBlockId; + bool m_IncrementalSort = true; + + void SetIncrementalSort(bool val) { m_IncrementalSort = val; } + + VkDeviceSize CalcMaxBlockSize() const; + // Finds and removes given block from vector. + void Remove(VmaDeviceMemoryBlock* pBlock); + // Performs single step in sorting m_Blocks. They may not be fully sorted + // after this call. + void IncrementallySortBlocks(); + void SortByFreeSize(); + + VkResult AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + bool HasEmptyBlock(); +}; +#endif // _VMA_BLOCK_VECTOR + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT +struct VmaDefragmentationContext_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T) +public: + VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info); + ~VmaDefragmentationContext_T(); + + void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } + + VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); + VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); + +private: + // Max number of allocations to ignore due to size constraints before ending single pass + static constexpr uint8_t MAX_ALLOCS_TO_IGNORE = 16; + enum class CounterStatus { Pass, Ignore, End }; + + struct FragmentedBlock + { + uint32_t data; + VmaDeviceMemoryBlock* block; + }; + struct StateBalanced + { + VkDeviceSize avgFreeSize = 0; + VkDeviceSize avgAllocSize = UINT64_MAX; + }; + struct StateExtensive + { + enum class Operation : uint8_t + { + FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, + MoveBuffers, MoveTextures, MoveAll, + Cleanup, Done + }; + + Operation operation = Operation::FindFreeBlockTexture; + size_t firstFreeBlock = SIZE_MAX; + }; + struct MoveAllocationData + { + VkDeviceSize size; + VkDeviceSize alignment; + VmaSuballocationType type; + VmaAllocationCreateFlags flags; + VmaDefragmentationMove move = {}; + }; + + const VkDeviceSize m_MaxPassBytes; + const uint32_t m_MaxPassAllocations; + const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback; + void* m_BreakCallbackUserData; + + VmaStlAllocator m_MoveAllocator; + VmaVector> m_Moves; + + uint8_t m_IgnoredAllocs = 0; + uint32_t m_Algorithm; + uint32_t m_BlockVectorCount; + VmaBlockVector* m_PoolBlockVector; + VmaBlockVector** m_pBlockVectors; + size_t m_ImmovableBlockCount = 0; + VmaDefragmentationStats m_GlobalStats = { 0 }; + VmaDefragmentationStats m_PassStats = { 0 }; + void* m_AlgorithmState = VMA_NULL; + + static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); + CounterStatus CheckCounters(VkDeviceSize bytes); + bool IncrementCounters(VkDeviceSize bytes); + bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); + bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); + + bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); + bool ComputeDefragmentation_Fast(VmaBlockVector& vector); + bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); + bool ComputeDefragmentation_Full(VmaBlockVector& vector); + bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); + + static void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); + bool MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent); +}; +#endif // _VMA_DEFRAGMENTATION_CONTEXT + +#ifndef _VMA_POOL_T +struct VmaPool_T +{ + friend struct VmaPoolListItemTraits; + VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T) +public: + VmaBlockVector m_BlockVector; + VmaDedicatedAllocationList m_DedicatedAllocations; + + VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); + ~VmaPool_T(); + + uint32_t GetId() const { return m_Id; } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); + +#if VMA_STATS_STRING_ENABLED + //void PrintDetailedMap(class VmaStringBuilder& sb); +#endif + +private: + uint32_t m_Id; + char* m_Name; + VmaPool_T* m_PrevPool = VMA_NULL; + VmaPool_T* m_NextPool = VMA_NULL; +}; + +struct VmaPoolListItemTraits +{ + typedef VmaPool_T ItemType; + + static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } + static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } + static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } + static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } +}; +#endif // _VMA_POOL_T + +#ifndef _VMA_CURRENT_BUDGET_DATA +struct VmaCurrentBudgetData +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData) +public: + + VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // VMA_MEMORY_BUDGET + + VmaCurrentBudgetData(); + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); +}; + +#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +VmaCurrentBudgetData::VmaCurrentBudgetData() +{ + for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockCount[heapIndex] = 0; + m_AllocationCount[heapIndex] = 0; + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif +} + +void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + m_AllocationBytes[heapIndex] += allocationSize; + ++m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} + +void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); + m_AllocationBytes[heapIndex] -= allocationSize; + VMA_ASSERT(m_AllocationCount[heapIndex] > 0); + --m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} +#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +#endif // _VMA_CURRENT_BUDGET_DATA + +#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR +/* +Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. +*/ +class VmaAllocationObjectAllocator +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator) +public: + explicit VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) + : m_Allocator(pAllocationCallbacks, 1024) {} + + template VmaAllocation Allocate(Types&&... args); + void Free(VmaAllocation hAlloc); + +private: + VMA_MUTEX m_Mutex; + VmaPoolAllocator m_Allocator; +}; + +template +VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) +{ + VmaMutexLock mutexLock(m_Mutex); + return m_Allocator.Alloc(std::forward(args)...); +} + +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ + VmaMutexLock mutexLock(m_Mutex); + m_Allocator.Free(hAlloc); +} +#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR + +#ifndef _VMA_VIRTUAL_BLOCK_T +struct VmaVirtualBlock_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T) +public: + const bool m_AllocationCallbacksSpecified; + const VkAllocationCallbacks m_AllocationCallbacks; + + explicit VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); + ~VmaVirtualBlock_T(); + + bool IsEmpty() const { return m_Metadata->IsEmpty(); } + void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } + void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } + void Clear() { m_Metadata->Clear(); } + + const VkAllocationCallbacks* GetAllocationCallbacks() const; + void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); + VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset); + void GetStatistics(VmaStatistics& outStats) const; + void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; +#if VMA_STATS_STRING_ENABLED + void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; +#endif + +private: + VmaBlockMetadata* m_Metadata; +}; + +#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) + : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) +{ + const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; + switch (algorithm) + { + case 0: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + break; + case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); + break; + default: + VMA_ASSERT(0); + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + } + + m_Metadata->Init(createInfo.size); +} + +VmaVirtualBlock_T::~VmaVirtualBlock_T() +{ + // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT + // to receive the list of the unfreed allocations. + if (!m_Metadata->IsEmpty()) + m_Metadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased virtual allocations. + VMA_ASSERT_LEAK(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!"); + + vma_delete(GetAllocationCallbacks(), m_Metadata); +} + +const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const +{ + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; +} + +void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) +{ + m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); +} + +VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset) +{ + VmaAllocationRequest request = {}; + if (m_Metadata->CreateAllocationRequest( + createInfo.size, // allocSize + VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment + (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress + VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant + createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy + &request)) + { + m_Metadata->Alloc(request, + VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant + createInfo.pUserData); + outAllocation = (VmaVirtualAllocation)request.allocHandle; + if(outOffset) + *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); + return VK_SUCCESS; + } + outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE; + if (outOffset) + *outOffset = UINT64_MAX; + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const +{ + VmaClearStatistics(outStats); + m_Metadata->AddStatistics(outStats); +} + +void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const +{ + VmaClearDetailedStatistics(outStats); + m_Metadata->AddDetailedStatistics(outStats); +} + +#if VMA_STATS_STRING_ENABLED +void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const +{ + VmaJsonWriter json(GetAllocationCallbacks(), sb); + json.BeginObject(); + + VmaDetailedStatistics stats; + CalculateDetailedStatistics(stats); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats); + + if (detailedMap) + { + json.WriteString("Details"); + json.BeginObject(); + m_Metadata->PrintDetailedMap(json); + json.EndObject(); + } + + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +#endif // _VMA_VIRTUAL_BLOCK_T + + +// Main allocator object. +struct VmaAllocator_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T) +public: + const bool m_UseMutex; + const uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + bool m_UseAmdDeviceCoherentMemory; + bool m_UseKhrBufferDeviceAddress; + bool m_UseExtMemoryPriority; + bool m_UseKhrMaintenance4; + bool m_UseKhrMaintenance5; + bool m_UseKhrExternalMemoryWin32; + const VkDevice m_hDevice; + const VkInstance m_hInstance; + const bool m_AllocationCallbacksSpecified; + const VkAllocationCallbacks m_AllocationCallbacks; + VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; + VmaAllocationObjectAllocator m_AllocationObjectAllocator; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; + + VkPhysicalDeviceProperties m_PhysicalDeviceProperties; + VkPhysicalDeviceMemoryProperties m_MemProps; + + // Default pools. + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; + + VmaCurrentBudgetData m_Budget; + VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. + + explicit VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + ~VmaAllocator_T(); + + const VkAllocationCallbacks* GetAllocationCallbacks() const + { + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; + } + const VmaVulkanFunctions& GetVulkanFunctions() const + { + return m_VulkanFunctions; + } + + VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } + + VkDeviceSize GetBufferImageGranularity() const + { + return VMA_MAX( + static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); + } + + uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } + uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } + + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { + VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); + return m_MemProps.memoryTypes[memTypeIndex].heapIndex; + } + // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { + return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + // Minimum alignment for all allocations in specific memory type. + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { + return IsMemoryTypeNonCoherent(memTypeIndex) ? + VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_MIN_ALIGNMENT; + } + + bool IsIntegratedGpu() const + { + return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + } + + uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } + + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + VkResult FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VmaBufferImageUsage bufImgUsage, + uint32_t* pMemoryTypeIndex) const; + + // Common code for public functions vmaCreateBuffer, vmaCreateBufferWithAlignment, etc. + VkResult CreateBuffer( + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + void* pMemoryAllocateNext, // pNext chain for VkMemoryAllocateInfo. + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); + // Common code for public functions vmaCreateImage, vmaCreateDedicatedImage. + VkResult CreateImage( + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + void* pMemoryAllocateNext, // pNext chain for VkMemoryAllocateInfo. + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); + + // Main allocation function. + VkResult AllocateMemory( + VkMemoryRequirements vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + void* pMemoryAllocateNext, // Optional pNext chain for VkMemoryAllocateInfo. + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Main deallocation function. + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); + + void CalculateStatistics(VmaTotalStatistics* pStats); + + void GetHeapBudgets( + VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + static void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + static void GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo); + + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + void DestroyPool(VmaPool pool); + static void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); + static void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); + + void SetCurrentFrameIndex(uint32_t frameIndex); + uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } + + static VkResult CheckPoolCorruption(VmaPool hPool); + VkResult CheckCorruption(uint32_t memoryTypeBits); + + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. + void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) const; + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) const; + + VkResult Map(VmaAllocation hAllocation, void** ppData); + void Unmap(VmaAllocation hAllocation); + + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + + VkResult FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); + VkResult FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op); + + VkResult CopyMemoryToAllocation( + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size); + VkResult CopyAllocationToMemory( + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size); + + void FillAllocation(VmaAllocation hAllocation, uint8_t pattern); + + /* + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ + uint32_t GetGpuDefragmentationMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const + { + return m_TypeExternalMemoryHandleTypes[memTypeIndex]; + } +#endif // #if VMA_EXTERNAL_MEMORY + +private: + VkDeviceSize m_PreferredLargeHeapBlockSize; + + VkPhysicalDevice m_PhysicalDevice; + VMA_ATOMIC_UINT32 m_CurrentFrameIndex; + VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES]; +#endif // #if VMA_EXTERNAL_MEMORY + + VMA_RW_MUTEX m_PoolsMutex; + typedef VmaIntrusiveLinkedList PoolList; + // Protected by m_PoolsMutex. + PoolList m_Pools; + uint32_t m_NextPoolId; + + VmaVulkanFunctions m_VulkanFunctions; + + // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. + uint32_t m_GlobalMemoryTypeBits; + + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Static(); +#endif + + void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Dynamic(); +#endif + + void ValidateVulkanFunctions() const; + + VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); + + VkResult AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + void* pMemoryAllocateNext, // Optional pNext chain for VkMemoryAllocateInfo. + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation); + + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. + VkResult AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain); + + void FreeDedicatedMemory(VmaAllocation allocation); + + VkResult CalcMemTypeParams( + VmaAllocationCreateInfo& outCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount); + static VkResult CalcAllocationParams( + VmaAllocationCreateInfo& outCreateInfo, + bool dedicatedRequired); + + /* + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ + uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + uint32_t CalculateGlobalMemoryTypeBits() const; + + bool GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET +}; + + +#ifndef _VMA_MEMORY_FUNCTIONS +namespace +{ +inline void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ + return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); +} + +inline void VmaFree(VmaAllocator hAllocator, void* ptr) +{ + VmaFree(&hAllocator->m_AllocationCallbacks, ptr); +} + +template +T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +template +void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { + ptr->~T(); + VmaFree(hAllocator, ptr); + } +} + +template +void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + ptr[i].~T(); + VmaFree(hAllocator, ptr); + } +} +} // namespace +#endif // _VMA_MEMORY_FUNCTIONS + +#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) + : m_pMetadata(VMA_NULL), + m_hParentPool(nullptr), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL){} + +VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() +{ + VMA_ASSERT_LEAK(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); + VMA_ASSERT_LEAK(m_hMemory == VK_NULL_HANDLE); +} + +void VmaDeviceMemoryBlock::Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_hParentPool = hParentPool; + m_MemoryTypeIndex = newMemoryTypeIndex; + m_Id = id; + m_hMemory = newMemory; + + switch (algorithm) + { + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + default: + VMA_ASSERT(0); + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + } + m_pMetadata->Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT + // to receive the list of the unfreed allocations. + if (!m_pMetadata->IsEmpty()) + m_pMetadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT_LEAK(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT_LEAK(m_hMemory != VK_NULL_HANDLE); + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); + m_hMemory = VK_NULL_HANDLE; + + vma_delete(allocator, m_pMetadata); + m_pMetadata = VMA_NULL; +} + +void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator) +{ + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + m_MappingHysteresis.PostAlloc(); +} + +void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) +{ + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if(m_MappingHysteresis.PostFree()) + { + VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); + if (m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && + (m_pMetadata->GetSize() != 0)); + + return m_pMetadata->Validate(); +} + +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = VMA_NULL; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + res = m_pMetadata->CheckCorruption(pData); + + Unmap(hAllocator, 1); + + return res; +} + +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if (count == 0) + { + return VK_SUCCESS; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (oldTotalMapCount != 0) + { + VMA_ASSERT(m_pMappedData != VMA_NULL); + m_MappingHysteresis.PostMap(); + m_MapCount += count; + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + return VK_SUCCESS; + } + + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if (result == VK_SUCCESS) + { + VMA_ASSERT(m_pMappedData != VMA_NULL); + m_MappingHysteresis.PostMap(); + m_MapCount = count; + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + } + return result; +} + +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if (count == 0) + { + return; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if (m_MapCount >= count) + { + m_MapCount -= count; + const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (totalMapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + m_MappingHysteresis.PostUnmap(); + } + else + { + VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); + } +} + +VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData = VMA_NULL; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + VmaWriteMagicValue(pData, allocOffset + allocSize); + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData = VMA_NULL; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); + } + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::BindBufferMemory( + VmaAllocator hAllocator, + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); +} + +VkResult VmaDeviceMemoryBlock::BindImageMemory( + VmaAllocator hAllocator, + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); +} + +#if VMA_EXTERNAL_MEMORY_WIN32 +VkResult VmaDeviceMemoryBlock::CreateWin32Handle(const VmaAllocator hAllocator, PFN_vkGetMemoryWin32HandleKHR pvkGetMemoryWin32HandleKHR, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE hTargetProcess, HANDLE* pHandle) noexcept +{ + VMA_ASSERT(pHandle); + return m_Handle.GetHandle(hAllocator->m_hDevice, m_hMemory, pvkGetMemoryWin32HandleKHR, handleType, hTargetProcess, hAllocator->m_UseMutex, pHandle); +} +#endif // VMA_EXTERNAL_MEMORY_WIN32 +#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS + +#ifndef _VMA_ALLOCATION_T_FUNCTIONS +VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) + : m_Alignment{ 1 }, + m_Size{ 0 }, + m_pUserData{ VMA_NULL }, + m_pName{ VMA_NULL }, + m_MemoryTypeIndex{ 0 }, + m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, + m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, + m_MapCount{ 0 }, + m_Flags{ 0 } +{ + if(mappingAllowed) + m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; +} + +VmaAllocation_T::~VmaAllocation_T() +{ + VMA_ASSERT_LEAK(m_MapCount == 0 && "Allocation was not unmapped before destruction."); + + // Check if owned string was freed. + VMA_ASSERT(m_pName == VMA_NULL); +} + +void VmaAllocation_T::InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(block != VMA_NULL); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_Alignment = alignment; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + if(mapped) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } + m_SuballocationType = (uint8_t)suballocationType; + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_AllocHandle = allocHandle; +} + +void VmaAllocation_T::InitDedicatedAllocation( + VmaAllocator allocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(hMemory != VK_NULL_HANDLE); + m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; + m_Alignment = 0; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_SuballocationType = (uint8_t)suballocationType; + m_DedicatedAllocation.m_ExtraData = VMA_NULL; + m_DedicatedAllocation.m_hParentPool = hParentPool; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_Prev = VMA_NULL; + m_DedicatedAllocation.m_Next = VMA_NULL; + + if (pMappedData != VMA_NULL) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + EnsureExtraData(allocator); + m_DedicatedAllocation.m_ExtraData->m_pMappedData = pMappedData; + } +} + +void VmaAllocation_T::Destroy(VmaAllocator allocator) +{ + FreeName(allocator); + + if (GetType() == ALLOCATION_TYPE_DEDICATED) + { + vma_delete(allocator, m_DedicatedAllocation.m_ExtraData); + } +} + +void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) +{ + VMA_ASSERT(pName == VMA_NULL || pName != m_pName); + + FreeName(hAllocator); + + if (pName != VMA_NULL) + m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); +} + +uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) +{ + VMA_ASSERT(allocation != VMA_NULL); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount != 0) + m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); + + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); + std::swap(m_BlockAllocation, allocation->m_BlockAllocation); + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); + +#if VMA_STATS_STRING_ENABLED + std::swap(m_BufferImageUsage, allocation->m_BufferImageUsage); +#endif + return m_MapCount; +} + +VmaAllocHandle VmaAllocation_T::GetAllocHandle() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_AllocHandle; + case ALLOCATION_TYPE_DEDICATED: + return VK_NULL_HANDLE; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; + } +} + +VmaPool VmaAllocation_T::GetParentPool() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetParentPool(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hParentPool; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +void* VmaAllocation_T::GetMappedData() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if (m_MapCount != 0 || IsPersistentMap()) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + GetOffset(); + } + else + { + return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_ExtraData != VMA_NULL && m_DedicatedAllocation.m_ExtraData->m_pMappedData != VMA_NULL) == + (m_MapCount != 0 || IsPersistentMap())); + return m_DedicatedAllocation.m_ExtraData != VMA_NULL ? m_DedicatedAllocation.m_ExtraData->m_pMappedData : VMA_NULL; + default: + VMA_ASSERT(0); + return VMA_NULL; + } +} + +void VmaAllocation_T::BlockAllocMap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + if (m_MapCount < 0xFF) + { + ++m_MapCount; + } + else + { + VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); + } +} + +void VmaAllocation_T::BlockAllocUnmap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount > 0) + { + --m_MapCount; + } + else + { + VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); + } +} + +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + EnsureExtraData(hAllocator); + + if (m_MapCount != 0 || IsPersistentMap()) + { + if (m_MapCount < 0xFF) + { + VMA_ASSERT(m_DedicatedAllocation.m_ExtraData->m_pMappedData != VMA_NULL); + *ppData = m_DedicatedAllocation.m_ExtraData->m_pMappedData; + ++m_MapCount; + return VK_SUCCESS; + } + + VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); + return VK_ERROR_MEMORY_MAP_FAILED; + } + + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if (result == VK_SUCCESS) + { + m_DedicatedAllocation.m_ExtraData->m_pMappedData = *ppData; + m_MapCount = 1; + } + return result; +} + +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if (m_MapCount > 0) + { + --m_MapCount; + if (m_MapCount == 0 && !IsPersistentMap()) + { + VMA_ASSERT(m_DedicatedAllocation.m_ExtraData != VMA_NULL); + m_DedicatedAllocation.m_ExtraData->m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); + + json.WriteString("Size"); + json.WriteNumber(m_Size); + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage.Value); // It may be uint32_t or uint64_t. + + if (m_pUserData != VMA_NULL) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); + } + if (m_pName != VMA_NULL) + { + json.WriteString("Name"); + json.WriteString(m_pName); + } +} +#if VMA_EXTERNAL_MEMORY_WIN32 +VkResult VmaAllocation_T::GetWin32Handle(VmaAllocator hAllocator, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE hTargetProcess, HANDLE* pHandle) noexcept +{ + auto pvkGetMemoryWin32HandleKHR = hAllocator->GetVulkanFunctions().vkGetMemoryWin32HandleKHR; + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->CreateWin32Handle(hAllocator, pvkGetMemoryWin32HandleKHR, handleType, hTargetProcess, pHandle); + case ALLOCATION_TYPE_DEDICATED: + EnsureExtraData(hAllocator); + return m_DedicatedAllocation.m_ExtraData->m_Handle.GetHandle(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory, pvkGetMemoryWin32HandleKHR, handleType, hTargetProcess, hAllocator->m_UseMutex, pHandle); + default: + VMA_ASSERT(0); + return VK_ERROR_FEATURE_NOT_PRESENT; + } +} +#endif // VMA_EXTERNAL_MEMORY_WIN32 +#endif // VMA_STATS_STRING_ENABLED + +void VmaAllocation_T::EnsureExtraData(VmaAllocator hAllocator) +{ + if (m_DedicatedAllocation.m_ExtraData == VMA_NULL) + { + m_DedicatedAllocation.m_ExtraData = vma_new(hAllocator, VmaAllocationExtraData)(); + } +} + +void VmaAllocation_T::FreeName(VmaAllocator hAllocator) +{ + if(m_pName) + { + VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); + m_pName = VMA_NULL; + } +} +#endif // _VMA_ALLOCATION_T_FUNCTIONS + +#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS +VmaBlockVector::VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext) + : m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_Priority(priority), + m_MinAllocationAlignment(minAllocationAlignment), + m_pMemoryAllocateNext(pMemoryAllocateNext), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) {} + +VmaBlockVector::~VmaBlockVector() +{ + for (size_t i = m_Blocks.size(); i--; ) + { + m_Blocks[i]->Destroy(m_hAllocator); + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +VkResult VmaBlockVector::CreateMinBlocks() +{ + for (size_t i = 0; i < m_MinBlockCount; ++i) + { + VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddStatistics(inoutStats); + } +} + +void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); + } +} + +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ +#if (VMA_DEBUG_DETECT_CORRUPTION == 0) || (VMA_DEBUG_MARGIN == 0) + return false; +#else + constexpr uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + return (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; +#endif +} + +VkResult VmaBlockVector::Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex = 0; + VkResult res = VK_SUCCESS; + + alignment = VMA_MAX(alignment, m_MinAllocationAlignment); + + if (IsCorruptionDetectionEnabled()) + { + size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + } + + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + for (; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if (res != VK_SUCCESS) + { + break; + } + } + } + + if (res != VK_SUCCESS) + { + // Free all already created allocations. + while (allocIndex--) + Free(pAllocations[allocIndex]); + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VkDeviceSize freeMemory = 0; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !HasExplicitBlockSize() && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; + const bool canCreateNewBlock = + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); + uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; + + // Upper address can only be used with linear allocator and within single memory block. + if (isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Early reject: requested allocation size is larger that maximum block size for this block vector. + if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + // 1. Search existing allocations. Try to allocate. + if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Use only last block. + if (!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from last block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + else + { + if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default + { + const bool isHostVisible = + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + if(isHostVisible) + { + const bool isMappingAllowed = (createInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + /* + For non-mappable allocations, check blocks that are not mapped first. + For mappable allocations, check blocks that are already mapped first. + This way, having many blocks, we will separate mappable and non-mappable allocations, + hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. + */ + for(size_t mappingI = 0; mappingI < 2; ++mappingI) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; + if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) + { + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + } + else + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + + // 2. Try to create new block. + if (canCreateNewBlock) + { + // Calculate optimal size for new block. + VkDeviceSize newBlockSize = m_PreferredBlockSize; + uint32_t newBlockSizeShift = 0; + const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; + + if (!m_ExplicitBlockSize) + { + // Allocate 1/8, 1/4, 1/2 as first blocks. + const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); + for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + } + else + { + break; + } + } + } + + size_t newBlockIndex = 0; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. + if (!m_ExplicitBlockSize) + { + while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize >= size) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + break; + } + } + } + + if (res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); + + res = AllocateFromBlock( + pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Created new block #%" PRIu32 " Size=%" PRIu64, pBlock->GetId(), newBlockSize); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + + // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaBlockVector::Free(VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } + + // Scope for lock. + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); + } + + if (hAllocation->IsPersistentMap()) + { + pBlock->Unmap(m_hAllocator, 1); + } + + const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); + pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); + pBlock->PostFree(m_hAllocator); + VMA_HEAVY_ASSERT(pBlock->Validate()); + + VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%" PRIu32, m_MemoryTypeIndex); + + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; + // pBlock became empty after this deallocation. + if (pBlock->m_pMetadata->IsEmpty()) + { + // Already had empty block. We don't want to have two, so delete this one. + if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) + { + pBlockToDelete = pBlock; + Remove(pBlock); + } + // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. + } + // pBlock didn't become empty, but we have another empty block - find and free that one. + // (This is optional, heuristics.) + else if (hadEmptyBlockBeforeFree && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if (pLastBlock->m_pMetadata->IsEmpty()) + { + pBlockToDelete = pLastBlock; + m_Blocks.pop_back(); + } + } + + IncrementallySortBlocks(); + + m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); + hAllocation->Destroy(m_hAllocator); + m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); + } + + // Destruction of a free block. Deferred until this point, outside of mutex + // lock, for performance reason. + if (pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%" PRIu32, pBlockToDelete->GetId()); + pBlockToDelete->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlockToDelete); + } +} + +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ + VkDeviceSize result = 0; + for (size_t i = m_Blocks.size(); i--; ) + { + result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); + if (result >= m_PreferredBlockSize) + { + break; + } + } + return result; +} + +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if (m_Blocks[blockIndex] == pBlock) + { + VmaVectorRemove(m_Blocks, blockIndex); + return; + } + } + VMA_ASSERT(0); +} + +void VmaBlockVector::IncrementallySortBlocks() +{ + if (!m_IncrementalSort) + return; + if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Bubble sort only until first swap. + for (size_t i = 1; i < m_Blocks.size(); ++i) + { + if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { + std::swap(m_Blocks[i - 1], m_Blocks[i]); + return; + } + } + } +} + +void VmaBlockVector::SortByFreeSize() +{ + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), + [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool + { + return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); + }); +} + +VkResult VmaBlockVector::AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VmaAllocationRequest currRequest = {}; + if (pBlock->m_pMetadata->CreateAllocationRequest( + size, + alignment, + isUpperAddress, + suballocType, + strategy, + &currRequest)) + { + return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +VkResult VmaBlockVector::CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + const bool isMappingAllowed = (allocFlags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + + pBlock->PostAlloc(m_hAllocator); + // Allocate from pCurrBlock. + if (mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); + pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); + (*pAllocation)->InitBlockAllocation( + pBlock, + allocRequest.allocHandle, + alignment, + allocRequest.size, // Not size, as actual allocation size may be larger than requested! + m_MemoryTypeIndex, + suballocType, + mapped); + VMA_HEAVY_ASSERT(pBlock->Validate()); + if (isUserDataString) + (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); + else + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); + +#if VMA_DEBUG_INITIALIZE_ALLOCATIONS + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); +#endif + + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; +} + +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.pNext = m_pMemoryAllocateNext; + allocInfo.memoryTypeIndex = m_MemoryTypeIndex; + allocInfo.allocationSize = blockSize; + +#if VMA_BUFFER_DEVICE_ADDRESS + // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if (m_hAllocator->m_UseKhrBufferDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } +#endif // VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if (m_hAllocator->m_UseExtMemoryPriority) + { + VMA_ASSERT(m_Priority >= 0.F && m_Priority <= 1.F); + priorityInfo.priority = m_Priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); + if (exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // VMA_EXTERNAL_MEMORY + + VkDeviceMemory mem = VK_NULL_HANDLE; + VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); + if (res < 0) + { + return res; + } + + // New VkDeviceMemory successfully created. + + // Create new Allocation for it. + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + pBlock->Init( + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm, + m_BufferImageGranularity); + + m_Blocks.push_back(pBlock); + if (pNewBlockIndex != VMA_NULL) + { + *pNewBlockIndex = m_Blocks.size() - 1; + } + + return VK_SUCCESS; +} + +bool VmaBlockVector::HasEmptyBlock() +{ + for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if (pBlock->m_pMetadata->IsEmpty()) + { + return true; + } + } + return false; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + + json.BeginObject(); + for (size_t i = 0; i < m_Blocks.size(); ++i) + { + json.BeginString(); + json.ContinueString(m_Blocks[i]->GetId()); + json.EndString(); + + json.BeginObject(); + json.WriteString("MapRefCount"); + json.WriteNumber(m_Blocks[i]->GetMapRefCount()); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + json.EndObject(); + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED + +VkResult VmaBlockVector::CheckCorruption() +{ + if (!IsCorruptionDetectionEnabled()) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VkResult res = pBlock->CheckCorruption(m_hAllocator); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +#endif // _VMA_BLOCK_VECTOR_FUNCTIONS + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS +VmaDefragmentationContext_T::VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info) + : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), + m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), + m_BreakCallback(info.pfnBreakCallback), + m_BreakCallbackUserData(info.pBreakCallbackUserData), + m_MoveAllocator(hAllocator->GetAllocationCallbacks()), + m_Moves(m_MoveAllocator), + m_Algorithm(info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK) +{ + if (info.pool != VMA_NULL) + { + m_BlockVectorCount = 1; + m_PoolBlockVector = &info.pool->m_BlockVector; + m_pBlockVectors = &m_PoolBlockVector; + m_PoolBlockVector->SetIncrementalSort(false); + m_PoolBlockVector->SortByFreeSize(); + } + else + { + m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); + m_PoolBlockVector = VMA_NULL; + m_pBlockVectors = hAllocator->m_pBlockVectors; + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + { + vector->SetIncrementalSort(false); + vector->SortByFreeSize(); + } + } + } + + switch (m_Algorithm) + { + case 0: // Default algorithm + m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + if (hAllocator->GetBufferImageGranularity() > 1) + { + m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); + } + break; + default: + ; // Do nothing. + } +} + +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + if (m_PoolBlockVector != VMA_NULL) + { + m_PoolBlockVector->SetIncrementalSort(true); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + vector->SetIncrementalSort(true); + } + } + + if (m_AlgorithmState) + { + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + default: + VMA_ASSERT(0); + } + } +} + +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) +{ + if (m_PoolBlockVector != VMA_NULL) + { + VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); + + if (m_PoolBlockVector->GetBlockCount() > 1) + ComputeDefragmentation(*m_PoolBlockVector, 0); + else if (m_PoolBlockVector->GetBlockCount() == 1) + ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + if (m_pBlockVectors[i] != VMA_NULL) + { + VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); + + if (m_pBlockVectors[i]->GetBlockCount() > 1) + { + if (ComputeDefragmentation(*m_pBlockVectors[i], i)) + break; + } + else if (m_pBlockVectors[i]->GetBlockCount() == 1) + { + if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) + break; + } + } + } + } + + moveInfo.moveCount = static_cast(m_Moves.size()); + if (moveInfo.moveCount > 0) + { + moveInfo.pMoves = m_Moves.data(); + return VK_INCOMPLETE; + } + + moveInfo.pMoves = VMA_NULL; + return VK_SUCCESS; +} + +VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) +{ + VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); + + VkResult result = VK_SUCCESS; + VmaStlAllocator blockAllocator(m_MoveAllocator.m_pCallbacks); + VmaVector> immovableBlocks(blockAllocator); + VmaVector> mappedBlocks(blockAllocator); + + VmaAllocator allocator = VMA_NULL; + for (uint32_t i = 0; i < moveInfo.moveCount; ++i) + { + VmaDefragmentationMove& move = moveInfo.pMoves[i]; + size_t prevCount = 0; + size_t currentCount = 0; + VkDeviceSize freedBlockSize = 0; + + uint32_t vectorIndex = 0; + VmaBlockVector* vector = VMA_NULL; + if (m_PoolBlockVector != VMA_NULL) + { + vector = m_PoolBlockVector; + } + else + { + vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); + vector = m_pBlockVectors[vectorIndex]; + VMA_ASSERT(vector != VMA_NULL); + } + + switch (move.operation) + { + case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: + { + uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); + if (mapCount > 0) + { + allocator = vector->m_hAllocator; + VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (FragmentedBlock& block : mappedBlocks) + { + if (block.block == newMapBlock) + { + notPresent = false; + block.data += mapCount; + break; + } + } + if (notPresent) + mappedBlocks.push_back({ mapCount, newMapBlock }); + } + + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + vector->Free(move.dstTmpAllocation); + + VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (const FragmentedBlock& block : immovableBlocks) + { + if (block.block == newBlock) + { + notPresent = false; + break; + } + } + if (notPresent) + immovableBlocks.push_back({ vectorIndex, newBlock }); + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.srcAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + freedBlockSize *= prevCount - currentCount; + + VkDeviceSize dstBlockSize = SIZE_MAX; + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + default: + VMA_ASSERT(0); + } + + if (prevCount > currentCount) + { + size_t freedBlocks = prevCount - currentCount; + m_PassStats.deviceMemoryBlocksFreed += static_cast(freedBlocks); + m_PassStats.bytesFreed += freedBlockSize; + } + + if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT && + m_AlgorithmState != VMA_NULL) + { + // Avoid unnecessary tries to allocate when new free block is available + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; + if (state.firstFreeBlock != SIZE_MAX) + { + const size_t diff = prevCount - currentCount; + if (state.firstFreeBlock >= diff) + { + state.firstFreeBlock -= diff; + if (state.firstFreeBlock != 0) + state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); + } + else + state.firstFreeBlock = 0; + } + } + } + moveInfo.moveCount = 0; + moveInfo.pMoves = VMA_NULL; + m_Moves.clear(); + + // Update stats + m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; + m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; + m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; + m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; + m_PassStats = { 0 }; + + // Move blocks with immovable allocations according to algorithm + if (!immovableBlocks.empty()) + { + do + { + if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT) + { + if (m_AlgorithmState != VMA_NULL) + { + bool swapped = false; + // Move to the start of free blocks range + for (const FragmentedBlock& block : immovableBlocks) + { + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; + if (state.operation != StateExtensive::Operation::Cleanup) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) + { + if (vector->GetBlock(i) == block.block) + { + std::swap(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); + if (state.firstFreeBlock != SIZE_MAX) + { + if (i + 1 < state.firstFreeBlock) + { + if (state.firstFreeBlock > 1) + std::swap(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); + else + --state.firstFreeBlock; + } + } + swapped = true; + break; + } + } + } + } + if (swapped) + result = VK_INCOMPLETE; + break; + } + } + + // Move to the beginning + for (const FragmentedBlock& block : immovableBlocks) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) + { + if (vector->GetBlock(i) == block.block) + { + std::swap(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); + break; + } + } + } + } while (false); + } + + // Bulk-map destination blocks + for (const FragmentedBlock& block : mappedBlocks) + { + VkResult res = block.block->Map(allocator, block.data, VMA_NULL); + VMA_ASSERT(res == VK_SUCCESS); + } + return result; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) +{ + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: + return ComputeDefragmentation_Fast(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + return ComputeDefragmentation_Balanced(vector, index, true); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: + return ComputeDefragmentation_Full(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + return ComputeDefragmentation_Extensive(vector, index); + default: + VMA_ASSERT(0); + return ComputeDefragmentation_Balanced(vector, index, true); + } +} + +VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( + VmaAllocHandle handle, VmaBlockMetadata* metadata) +{ + MoveAllocationData moveData; + moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); + moveData.size = moveData.move.srcAllocation->GetSize(); + moveData.alignment = moveData.move.srcAllocation->GetAlignment(); + moveData.type = moveData.move.srcAllocation->GetSuballocationType(); + moveData.flags = 0; + + if (moveData.move.srcAllocation->IsPersistentMap()) + moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; + if (moveData.move.srcAllocation->IsMappingAllowed()) + moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + + return moveData; +} + +VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) +{ + // Check custom criteria if exists + if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData)) + return CounterStatus::End; + + // Ignore allocation if will exceed max size for copy + if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) + { + if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) + return CounterStatus::Ignore; + return CounterStatus::End; + } + + m_IgnoredAllocs = 0; + return CounterStatus::Pass; +} + +bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) +{ + m_PassStats.bytesMoved += bytes; + // Early return when max found + if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) + { + VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations || + m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!"); + return true; + } + return false; +} + +bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) +{ + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) +{ + for (; start < end; ++start) + { + VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); + if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) + { + if (vector.AllocateFromBlock(dstBlock, + data.size, + data.alignment, + data.flags, + this, + data.type, + 0, + &data.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(data.move); + if (IncrementCounters(data.size)) + return true; + break; + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) +{ + // Move only between blocks + + // Go through allocations in last blocks and try to fit them inside first ones + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0), + // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block) + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + if (update && vectorState.avgAllocSize == UINT64_MAX) + UpdateVectorStatistics(vector, vectorState); + + const size_t startMoveCount = m_Moves.size(); + VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + VkDeviceSize prevFreeRegionSize = 0; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + // Check if realloc will make sense + if (prevFreeRegionSize >= minimalFreeRegion || + nextFreeRegionSize >= minimalFreeRegion || + moveData.size <= vectorState.avgFreeSize || + moveData.size <= vectorState.avgAllocSize) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + prevFreeRegionSize = nextFreeRegionSize; + } + } + + // No moves performed, update statistics to current vector state + if (startMoveCount == m_Moves.size() && !update) + { + vectorState.avgAllocSize = UINT64_MAX; + return ComputeDefragmentation_Balanced(vector, index, false); + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0) + + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) +{ + // First free single block, then populate it to the brim, then free another block, and so on + + // Fallback to previous algorithm since without granularity conflicts it can achieve max packing + if (vector.m_BufferImageGranularity == 1) + return ComputeDefragmentation_Full(vector); + + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + + bool texturePresent = false; + bool bufferPresent = false; + bool otherPresent = false; + switch (vectorState.operation) + { + case StateExtensive::Operation::Done: // Vector defragmented + return false; + case StateExtensive::Operation::FindFreeBlockBuffer: + case StateExtensive::Operation::FindFreeBlockTexture: + case StateExtensive::Operation::FindFreeBlockAll: + { + // No more blocks to free, just perform fast realloc and move to cleanup + if (vectorState.firstFreeBlock == 0) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + return ComputeDefragmentation_Fast(vector); + } + + // No free blocks, have to clear last one + size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; + VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; + + const size_t prevMoveCount = m_Moves.size(); + for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = freeMetadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, freeMetadata); + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, last, moveData, vector)) + { + // Full clear performed already + if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) + vectorState.firstFreeBlock = last; + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // Cannot perform full clear, have to move data in other blocks around + if (last != 0) + { + for (size_t i = last - 1; i; --i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // No possible reallocs within blocks, try to move them around fast + return ComputeDefragmentation_Fast(vector); + } + } + else + { + switch (vectorState.operation) + { + case StateExtensive::Operation::FindFreeBlockBuffer: + vectorState.operation = StateExtensive::Operation::MoveBuffers; + break; + case StateExtensive::Operation::FindFreeBlockTexture: + vectorState.operation = StateExtensive::Operation::MoveTextures; + break; + case StateExtensive::Operation::FindFreeBlockAll: + vectorState.operation = StateExtensive::Operation::MoveAll; + break; + default: + VMA_ASSERT(0); + vectorState.operation = StateExtensive::Operation::MoveTextures; + } + vectorState.firstFreeBlock = last; + // Nothing done, block found without reallocations, can perform another reallocs in same pass + return ComputeDefragmentation_Extensive(vector, index); + } + break; + } + case StateExtensive::Operation::MoveTextures: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (texturePresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!bufferPresent && !otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more textures to move, check buffers + vectorState.operation = StateExtensive::Operation::MoveBuffers; + bufferPresent = false; + otherPresent = false; + } + else + break; + VMA_FALLTHROUGH; // Fallthrough + } + case StateExtensive::Operation::MoveBuffers: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (bufferPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more buffers to move, check all others + vectorState.operation = StateExtensive::Operation::MoveAll; + otherPresent = false; + } + else + break; + VMA_FALLTHROUGH; // Fallthrough + } + case StateExtensive::Operation::MoveAll: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (otherPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + // Everything moved + vectorState.operation = StateExtensive::Operation::Cleanup; + } + break; + } + case StateExtensive::Operation::Cleanup: + // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). + break; + } + + if (vectorState.operation == StateExtensive::Operation::Cleanup) + { + // All other work done, pack data in blocks even tighter if possible + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + + if (prevMoveCount == m_Moves.size()) + vectorState.operation = StateExtensive::Operation::Done; + } + return false; +} + +void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) +{ + size_t allocCount = 0; + size_t freeCount = 0; + state.avgFreeSize = 0; + state.avgAllocSize = 0; + + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + allocCount += metadata->GetAllocationCount(); + freeCount += metadata->GetFreeRegionsCount(); + state.avgFreeSize += metadata->GetSumFreeSize(); + state.avgAllocSize += metadata->GetSize(); + } + + state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; + state.avgFreeSize /= freeCount; +} + +bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent) +{ + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = firstFreeBlock ; i;) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(--i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Move only single type of resources at once + if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) + { + // Try to fit allocation into free blocks + if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) + return false; + } + + if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) + texturePresent = true; + else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) + bufferPresent = true; + else + otherPresent = true; + } + } + return prevMoveCount == m_Moves.size(); +} +#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS + +#ifndef _VMA_POOL_T_FUNCTIONS +VmaPool_T::VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) + : m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm + createInfo.priority, + VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), + createInfo.pMemoryAllocateNext), + m_Id(0), + m_Name(VMA_NULL) {} + +VmaPool_T::~VmaPool_T() +{ + VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); + + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if (pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } +} +#endif // _VMA_POOL_T_FUNCTIONS + +#ifndef _VMA_ALLOCATOR_T_FUNCTIONS +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), + m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), + m_UseKhrMaintenance4((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT) != 0), + m_UseKhrMaintenance5((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT) != 0), + m_UseKhrExternalMemoryWin32((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_DeviceMemoryCount(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) +{ + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { + // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. + VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); + } + + VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); + + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { +#if !(VMA_DEDICATED_ALLOCATION) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if !(VMA_BUFFER_DEVICE_ADDRESS) + if(m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if VMA_VULKAN_VERSION < 1004000 + VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 4, 0) && "vulkanApiVersion >= VK_API_VERSION_1_4 but required Vulkan version is disabled by preprocessor macros."); +#endif +#if VMA_VULKAN_VERSION < 1003000 + VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 3, 0) && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros."); +#endif +#if VMA_VULKAN_VERSION < 1002000 + VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 2, 0) && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); +#endif +#if VMA_VULKAN_VERSION < 1001000 + VMA_ASSERT(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0) && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); +#endif +#if !(VMA_MEMORY_PRIORITY) + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if !(VMA_KHR_MAINTENANCE4) + if(m_UseKhrMaintenance4) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if !(VMA_KHR_MAINTENANCE5) + if(m_UseKhrMaintenance5) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if !(VMA_KHR_MAINTENANCE5) + if(m_UseKhrMaintenance5) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif + +#if !(VMA_EXTERNAL_MEMORY_WIN32) + if(m_UseKhrExternalMemoryWin32) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif + + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); + memset(&m_MemProps, 0, sizeof(m_MemProps)); + + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); + +#if VMA_EXTERNAL_MEMORY + memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { + m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; + m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; + m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; + } + + ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); + + (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); + (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); + + VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT)); + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); + + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? + pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) + { + memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, + sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); + } +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1U << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { + m_MemProps.memoryHeaps[heapIndex].size = limit; + } + } + } + } + + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + // Create only supported types + if((m_GlobalMemoryTypeBits & (1U << memTypeIndex)) != 0) + { + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); + m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + false, // explicitBlockSize + 0, // algorithm + 0.5F, // priority (0.5 is the default per Vulkan spec) + GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment + VMA_NULL); // // pMemoryAllocateNext + // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, + // because minBlockCount is 0. + } + } +} + +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ + VkResult res = VK_SUCCESS; + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + + return res; +} + +VmaAllocator_T::~VmaAllocator_T() +{ + VMA_ASSERT(m_Pools.IsEmpty()); + + for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) + { + vma_delete(this, m_pBlockVectors[memTypeIndex]); + } +} + +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Static(); +#endif + + if(pVulkanFunctions != VMA_NULL) + { + ImportVulkanFunctions_Custom(pVulkanFunctions); + } + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Dynamic(); +#endif + + ValidateVulkanFunctions(); +} + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Static() +{ + // Vulkan 1.0 + m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr; + m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr; + m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; + m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; + m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; + m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; + m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; + m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; + m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; + m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; + m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; + m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; + m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; + m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; + m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; + m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; + m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; + m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; + + // Vulkan 1.1 +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; + m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; + m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; + } +#endif + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; + } +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; + m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; + } +#endif +} + +#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) +{ + VMA_ASSERT(pVulkanFunctions != VMA_NULL); + +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + + VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + VMA_COPY_IF_NOT_NULL(vkGetMemoryWin32HandleKHR); +#endif +#undef VMA_COPY_IF_NOT_NULL +} + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Dynamic() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && + "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " + "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " + "Other members can be null."); + +#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); +#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); + + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); + VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); + VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); + VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); + VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); + VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); + VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); + VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); + VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); + VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); + // Try to fetch the pointer from the other name, based on suspected driver bug - see issue #410. + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } + else if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + // Try to fetch the pointer from the other name, based on suspected driver bug - see issue #410. + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); + } +#endif + +#if VMA_DEDICATED_ALLOCATION + if(m_UseKhrDedicatedAllocation) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); + } +#endif + +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); + } + else if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif // #if VMA_MEMORY_BUDGET + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); + } +#endif +#if VMA_KHR_MAINTENANCE4 + if(m_UseKhrMaintenance4) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirementsKHR, "vkGetDeviceBufferMemoryRequirementsKHR"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirementsKHR, "vkGetDeviceImageMemoryRequirementsKHR"); + } +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + if (m_UseKhrExternalMemoryWin32) + { + VMA_FETCH_DEVICE_FUNC(vkGetMemoryWin32HandleKHR, PFN_vkGetMemoryWin32HandleKHR, "vkGetMemoryWin32HandleKHR"); + } +#endif +#undef VMA_FETCH_DEVICE_FUNC +#undef VMA_FETCH_INSTANCE_FUNC +} + +#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ValidateVulkanFunctions() const +{ + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + if (m_UseKhrExternalMemoryWin32) + { + VMA_ASSERT(m_VulkanFunctions.vkGetMemoryWin32HandleKHR != VMA_NULL); + } +#endif + + // Not validating these due to suspected driver bugs with these function + // pointers being null despite correct extension or Vulkan version is enabled. + // See issue #397. Their usage in VMA is optional anyway. + // + // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); + // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); +} + +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); +} + +VkResult VmaAllocator_T::AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + void* pMemoryAllocateNext, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%" PRIu32 ", AllocationCount=%zu, Size=%" PRIu64, memTypeIndex, allocationCount, size); + + VmaAllocationCreateInfo finalCreateInfo = createInfo; + VkResult res = CalcMemTypeParams( + finalCreateInfo, + memTypeIndex, + size, + allocationCount); + if(res != VK_SUCCESS) + return res; + + const void* allocateNextPtr = blockVector.GetAllocationNextPtr(); + if(pMemoryAllocateNext != VMA_NULL) + { + VMA_ASSERT(allocateNextPtr == VMA_NULL && + "You shouldn't create a dedicated allocation with a custom pMemoryAllocateNext if the pNext chain is already provided for this pool."); + allocateNextPtr = pMemoryAllocateNext; + } + + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + return AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + allocateNextPtr); + } + + const bool canAllocateDedicated = + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); + + if(canAllocateDedicated) + { + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + if(size > blockVector.GetPreferredBlockSize() / 2) + { + dedicatedPreferred = true; + } + // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, + // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above + // 3/4 of the maximum allocation count. + if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 && + m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) + { + dedicatedPreferred = false; + } + + if(dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + allocateNextPtr); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + } + + res = blockVector.Allocate( + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Try dedicated memory. + if(canAllocateDedicated && !dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + allocateNextPtr); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + + // Everything failed: Return error code. + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain) +{ + VMA_ASSERT(allocationCount > 0 && pAllocations); + + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = memTypeIndex; + allocInfo.allocationSize = size; + allocInfo.pNext = pNextChain; + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; + if(!canAliasMemory) + { + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { + VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); + dedicatedAllocInfo.buffer = dedicatedBuffer; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + dedicatedAllocInfo.image = dedicatedImage; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + } + } +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + +#if VMA_BUFFER_DEVICE_ADDRESS + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_UseKhrBufferDeviceAddress) + { + bool canContainBufferWithDeviceAddress = true; + if(dedicatedBuffer != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == VmaBufferImageUsage::UNKNOWN || + dedicatedBufferImageUsage.Contains(VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = false; + } + if(canContainBufferWithDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(priority >= 0.F && priority <= 1.F); + priorityInfo.priority = priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // #if VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); + if(exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // #if VMA_EXTERNAL_MEMORY + + size_t allocIndex = 0; + VkResult res = VK_SUCCESS; + for(; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + pool, + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + isMappingAllowed, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + dedicatedAllocations.Register(pAllocations[allocIndex]); + } + VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%" PRIu32, allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); + m_AllocationObjectAllocator.Free(currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation) +{ + VkDeviceMemory hMemory = VK_NULL_HANDLE; + VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); + if(res < 0) + { + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + + void* pMappedData = VMA_NULL; + if(map) + { + res = (*m_VulkanFunctions.vkMapMemory)( + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { + VMA_DEBUG_LOG(" vkMapMemory FAILED"); + FreeVulkanMemory(memTypeIndex, size, hMemory); + return res; + } + } + + *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); + (*pAllocation)->InitDedicatedAllocation(this, pool, memTypeIndex, hMemory, suballocType, pMappedData, size); + if (isUserDataString) + (*pAllocation)->SetName(this, (const char*)pUserData); + else + (*pAllocation)->SetUserData(this, pUserData); + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + +#if VMA_DEBUG_INITIALIZE_ALLOCATIONS + FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); +#endif + + return VK_SUCCESS; +} + +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +VkResult VmaAllocator_T::FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VmaBufferImageUsage bufImgUsage, + uint32_t* pMemoryTypeIndex) const +{ + memoryTypeBits &= GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + VkMemoryPropertyFlags requiredFlags = 0; + VkMemoryPropertyFlags preferredFlags = 0; + VkMemoryPropertyFlags notPreferredFlags = 0; + if(!FindMemoryPreferences( + IsIntegratedGpu(), + *pAllocationCreateInfo, + bufImgUsage, + requiredFlags, preferredFlags, notPreferredFlags)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + + VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + +VkResult VmaAllocator_T::CalcMemTypeParams( + VmaAllocationCreateInfo& inoutCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount) +{ + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetHeapBudgets(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::CalcAllocationParams( + VmaAllocationCreateInfo& inoutCreateInfo, + bool dedicatedRequired) +{ + VMA_ASSERT((inoutCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && + "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); + VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || + (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && + "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) + { + VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && + "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + } + } + + // If memory is lazily allocated, it should be always dedicated. + if(dedicatedRequired || + inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if(inoutCreateInfo.pool != VK_NULL_HANDLE) + { + if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + +#if VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } +#endif + + // Non-auto USAGE values imply HOST_ACCESS flags. + // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. + // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. + // Otherwise they just protect from assert on mapping. + if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + } + } + + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::CreateBuffer( + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + void* pMemoryAllocateNext, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + if (pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + // 1. Create VkBuffer. + VkResult res = (*m_VulkanFunctions.vkCreateBuffer)(m_hDevice, pBufferCreateInfo, + GetAllocationCallbacks(), pBuffer); + if (res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 3. Allocate memory using allocator. + res = AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage(*pBufferCreateInfo, m_UseKhrMaintenance5), // dedicatedBufferImageUsage + pMemoryAllocateNext, + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + if (res >= 0) + { + // 3. Bind buffer with memory. + if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if (res >= 0) + { + // All steps succeeded. +#if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, m_UseKhrMaintenance5); +#endif + if (pAllocationInfo != VMA_NULL) + { + GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + FreeMemory(1, pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*m_VulkanFunctions.vkDestroyBuffer)(m_hDevice, *pBuffer, GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*m_VulkanFunctions.vkDestroyBuffer)(m_hDevice, *pBuffer, GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VkResult VmaAllocator_T::CreateImage( + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + void* pMemoryAllocateNext, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + if (pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + // 1. Create VkImage. + VkResult res = (*m_VulkanFunctions.vkCreateImage)(m_hDevice, pImageCreateInfo, + GetAllocationCallbacks(), pImage); + if (res == VK_SUCCESS) + { + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + + // 2. Allocate memory using allocator. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + GetImageMemoryRequirements(*pImage, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + res = AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + *pImage, // dedicatedImage + VmaBufferImageUsage(*pImageCreateInfo), // dedicatedBufferImageUsage + pMemoryAllocateNext, + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); + if (res == VK_SUCCESS) + { + // 3. Bind image with memory. + if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); + } + if (res == VK_SUCCESS) + { + // All steps succeeded. +#if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitImageUsage(*pImageCreateInfo); +#endif + if (pAllocationInfo != VMA_NULL) + { + GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + FreeMemory(1, pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*m_VulkanFunctions.vkDestroyImage)(m_hDevice, *pImage, GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + (*m_VulkanFunctions.vkDestroyImage)(m_hDevice, *pImage, GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + return res; +} + +VkResult VmaAllocator_T::AllocateMemory( + VkMemoryRequirements vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + // pNext chain for VkMemoryAllocateInfo. When used, must specify requiresDedicatedAllocation = true. + void* pMemoryAllocateNext, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + + vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, createInfo.minAlignment); + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); + + // If using custom pNext chain for VkMemoryAllocateInfo, must require dedicated allocations. + if(pMemoryAllocateNext != VMA_NULL) + { + requiresDedicatedAllocation = true; + } + + if(vkMemReq.size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VmaAllocationCreateInfo createInfoFinal = createInfo; + VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation); + if(res != VK_SUCCESS) + return res; + + if(createInfoFinal.pool != VK_NULL_HANDLE) + { + VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; + return AllocateMemoryOfType( + createInfoFinal.pool, + vkMemReq.size, + vkMemReq.alignment, + prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + pMemoryAllocateNext, + createInfoFinal, + blockVector.GetMemoryTypeIndex(), + suballocType, + createInfoFinal.pool->m_DedicatedAllocations, + blockVector, + allocationCount, + pAllocations); + } + + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + if(res != VK_SUCCESS) + return res; + + do + { + VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); + res = AllocateMemoryOfType( + VK_NULL_HANDLE, + vkMemReq.size, + vkMemReq.alignment, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + pMemoryAllocateNext, + createInfoFinal, + memTypeIndex, + suballocType, + m_DedicatedAllocations[memTypeIndex], + *blockVector, + allocationCount, + pAllocations); + // Allocation succeeded + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1U << memTypeIndex); + // Find alternative memTypeIndex. + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + } while(res == VK_SUCCESS); + + // No other matching memory type index could be found. + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations); + + for(size_t allocIndex = allocationCount; allocIndex--; ) + { + VmaAllocation allocation = pAllocations[allocIndex]; + + if(allocation != VK_NULL_HANDLE) + { +#if VMA_DEBUG_INITIALIZE_ALLOCATIONS + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); +#endif + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetParentPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); + } + } + } +} + +void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) +{ + // Initialize. + VmaClearDetailedStatistics(pStats->total); + for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + VmaClearDetailedStatistics(pStats->memoryType[i]); + for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + VmaClearDetailedStatistics(pStats->memoryHeap[i]); + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if (pBlockVector != VMA_NULL) + pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); + blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + } + + // Process dedicated allocations. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Sum from memory types to memory heaps. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; + VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); + } + + // Sum from memory heaps to total. + for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) + VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); + + VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || + pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); + VMA_ASSERT(pStats->total.unusedRangeCount == 0 || + pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); +} + +void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudgets->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudgets->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudgets->usage = outBudgets->statistics.blockBytes; + outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = hAllocation->GetMappedData(); + pAllocationInfo->pUserData = hAllocation->GetUserData(); + pAllocationInfo->pName = hAllocation->GetName(); +} + +void VmaAllocator_T::GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo) +{ + GetAllocationInfo(hAllocation, &pAllocationInfo->allocationInfo); + + switch (hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + pAllocationInfo->blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize(); + pAllocationInfo->dedicatedMemory = VK_FALSE; + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + pAllocationInfo->blockSize = pAllocationInfo->allocationInfo.size; + pAllocationInfo->dedicatedMemory = VK_TRUE; + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ + VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%" PRIu32 ", flags=%" PRIu32, pCreateInfo->memoryTypeIndex, pCreateInfo->flags); + + VmaPoolCreateInfo newCreateInfo = *pCreateInfo; + + // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. + if(pCreateInfo->pMemoryAllocateNext) + { + VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); + } + + if(newCreateInfo.maxBlockCount == 0) + { + newCreateInfo.maxBlockCount = SIZE_MAX; + } + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1U << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + if(newCreateInfo.minAllocationAlignment > 0) + { + VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); + } + + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); + + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) + { + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; + } + + // Add to m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + (*pPool)->SetId(m_NextPoolId++); + m_Pools.PushBack(*pPool); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ + // Remove from m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + m_Pools.Remove(pool); + } + + vma_delete(this, pool); +} + +void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) +{ + VmaClearStatistics(*pPoolStats); + pool->m_BlockVector.AddStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); +} + +void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) +{ + VmaClearDetailedStatistics(*pPoolStats); + pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); +} + +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ + m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET +} + +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ + return hPool->m_BlockVector.CheckCorruption(); +} + +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ + VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if(pBlockVector != VMA_NULL) + { + VkResult localRes = pBlockVector->CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + if(((1U << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { + VkResult localRes = pool->m_BlockVector.CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + } + + return finalRes; +} + +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); + +#if VMA_DEBUG_DONT_EXCEED_HEAP_SIZE_WITH_ALLOCATION_SIZE + if (pAllocateInfo->allocationSize > m_MemProps.memoryHeaps[heapIndex].size) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } +#endif + + AtomicTransactionalIncrement deviceMemoryCountIncrement; + const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); +#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) + { + return VK_ERROR_TOO_MANY_OBJECTS; + } +#endif + + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1U << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; + } + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + ++m_Budget.m_BlockCount[heapIndex]; + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); + + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); + } + + deviceMemoryCountIncrement.Commit(); + } + else + { + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; + } + + return res; +} + +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); + } + + // VULKAN CALL vkFreeMemory. + (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= size; + + --m_DeviceMemoryCount; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) const +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) const +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } +#endif // #if VMA_BIND_MEMORY2 + + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); +} + +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); + } + return res; + } + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; + } +} + +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VkResult res = VK_ERROR_UNKNOWN_COPY; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VkResult res = VK_ERROR_UNKNOWN_COPY; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ + VkResult res = VK_SUCCESS; + + VkMappedMemoryRange memRange = {}; + if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op) +{ + typedef VmaStlAllocator RangeAllocator; + typedef VmaSmallVector RangeVector; + RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); + + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation alloc = allocations[allocIndex]; + const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; + const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; + VkMappedMemoryRange newRange; + if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) + { + ranges.push_back(newRange); + } + } + + VkResult res = VK_SUCCESS; + if(!ranges.empty()) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::CopyMemoryToAllocation( + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size) +{ + void* dstMappedData = VMA_NULL; + VkResult res = Map(dstAllocation, &dstMappedData); + if(res == VK_SUCCESS) + { + memcpy((char*)dstMappedData + dstAllocationLocalOffset, pSrcHostPointer, (size_t)size); + Unmap(dstAllocation); + res = FlushOrInvalidateAllocation(dstAllocation, dstAllocationLocalOffset, size, VMA_CACHE_FLUSH); + } + return res; +} + +VkResult VmaAllocator_T::CopyAllocationToMemory( + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size) +{ + void* srcMappedData = VMA_NULL; + VkResult res = Map(srcAllocation, &srcMappedData); + if(res == VK_SUCCESS) + { + res = FlushOrInvalidateAllocation(srcAllocation, srcAllocationLocalOffset, size, VMA_CACHE_INVALIDATE); + if(res == VK_SUCCESS) + { + memcpy(pDstHostPointer, (const char*)srcMappedData + srcAllocationLocalOffset, (size_t)size); + Unmap(srcAllocation); + } + } + return res; +} + +void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation) +{ + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + VmaPool parentPool = allocation->GetParentPool(); + if(parentPool == VK_NULL_HANDLE) + { + // Default pool + m_DedicatedAllocations[memTypeIndex].Unregister(allocation); + } + else + { + // Custom pool + parentPool->m_DedicatedAllocations.Unregister(allocation); + } + + VkDeviceMemory hMemory = allocation->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + allocation->Destroy(this); + m_AllocationObjectAllocator.Free(allocation); + + VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%" PRIu32, memTypeIndex); +} + +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ + VkBufferCreateInfo dummyBufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); + + uint32_t memoryTypeBits = 0; + + // Create buffer. + VkBuffer buf = VK_NULL_HANDLE; + VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { + // Query for supported memory types. + VkMemoryRequirements memReq; + (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); + memoryTypeBits = memReq.memoryTypeBits; + + // Destroy buffer. + (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); + } + + return memoryTypeBits; +} + +uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const +{ + // Make sure memory information is already fetched. + VMA_ASSERT(GetMemoryTypeCount() > 0); + + uint32_t memoryTypeBits = UINT32_MAX; + + if(!m_UseAmdDeviceCoherentMemory) + { + // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + memoryTypeBits &= ~(1U << memTypeIndex); + } + } + } + + return memoryTypeBits; +} + +bool VmaAllocator_T::GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const +{ + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { + const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + const VkDeviceSize allocationSize = allocation->GetSize(); + VMA_ASSERT(offset <= allocationSize); + + outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + outRange.pNext = VMA_NULL; + outRange.memory = allocation->GetMemory(); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + outRange.size = allocationSize - outRange.offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + outRange.size = VMA_MIN( + VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), + allocationSize - outRange.offset); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = allocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); + outRange.offset += allocationOffset; + outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); + + break; + } + default: + VMA_ASSERT(0); + } + return true; + } + return false; +} + +#if VMA_MEMORY_BUDGET +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + VmaPnextChainPushFront(&memProps, &budgetProps); + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + + // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. + if(m_Budget.m_VulkanBudget[heapIndex] == 0) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; + } + if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) + { + m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} +#endif // VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(VmaAllocation hAllocation, uint8_t pattern) +{ +#if VMA_DEBUG_INITIALIZE_ALLOCATIONS + if(hAllocation->IsMappingAllowed() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; + VkResult res = Map(hAllocation, &pData); + if(res == VK_SUCCESS) + { + memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); + FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); + Unmap(hAllocation); + } + else + { + VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); + } + } +#endif // #if VMA_DEBUG_INITIALIZE_ALLOCATIONS +} + +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ + uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); + if(memoryTypeBits == UINT32_MAX) + { + memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); + m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); + } + return memoryTypeBits; +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ + json.WriteString("DefaultPools"); + json.BeginObject(); + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; + VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; + if (pBlockVector != VMA_NULL) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("PreferredBlockSize"); + json.WriteNumber(pBlockVector->GetPreferredBlockSize()); + + json.WriteString("Blocks"); + pBlockVector->PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + dedicatedAllocList.BuildStatsString(json); + } + json.EndObject(); + } + } + } + json.EndObject(); + + json.WriteString("CustomPools"); + json.BeginObject(); + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + if (!m_Pools.IsEmpty()) + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + bool displayType = true; + size_t index = 0; + for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + if (blockVector.GetMemoryTypeIndex() == memTypeIndex) + { + if (displayType) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginArray(); + displayType = false; + } + + json.BeginObject(); + { + json.WriteString("Name"); + json.BeginString(); + json.ContinueString((uint64_t)index++); + if (pool->GetName()) + { + json.ContinueString(" - "); + json.ContinueString(pool->GetName()); + } + json.EndString(); + + json.WriteString("PreferredBlockSize"); + json.WriteNumber(blockVector.GetPreferredBlockSize()); + + json.WriteString("Blocks"); + blockVector.PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + pool->m_DedicatedAllocations.BuildStatsString(json); + } + json.EndObject(); + } + } + + if (!displayType) + json.EndArray(); + } + } + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_ALLOCATOR_T_FUNCTIONS + + +#ifndef _VMA_PUBLIC_INTERFACE + +#ifdef VOLK_HEADER_VERSION + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaImportVulkanFunctionsFromVolk( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pAllocatorCreateInfo, + VmaVulkanFunctions* VMA_NOT_NULL pDstVulkanFunctions) +{ + VMA_ASSERT(pAllocatorCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocatorCreateInfo->instance != VK_NULL_HANDLE); + VMA_ASSERT(pAllocatorCreateInfo->device != VK_NULL_HANDLE); + + memset(pDstVulkanFunctions, 0, sizeof(*pDstVulkanFunctions)); + + VolkDeviceTable src = {}; + volkLoadDeviceTable(&src, pAllocatorCreateInfo->device); + +#define COPY_GLOBAL_TO_VMA_FUNC(volkName, vmaName) if(!pDstVulkanFunctions->vmaName) pDstVulkanFunctions->vmaName = volkName; +#define COPY_DEVICE_TO_VMA_FUNC(volkName, vmaName) if(!pDstVulkanFunctions->vmaName) pDstVulkanFunctions->vmaName = src.volkName; + + COPY_GLOBAL_TO_VMA_FUNC(vkGetInstanceProcAddr, vkGetInstanceProcAddr) + COPY_GLOBAL_TO_VMA_FUNC(vkGetDeviceProcAddr, vkGetDeviceProcAddr) + COPY_GLOBAL_TO_VMA_FUNC(vkGetPhysicalDeviceProperties, vkGetPhysicalDeviceProperties) + COPY_GLOBAL_TO_VMA_FUNC(vkGetPhysicalDeviceMemoryProperties, vkGetPhysicalDeviceMemoryProperties) + COPY_DEVICE_TO_VMA_FUNC(vkAllocateMemory, vkAllocateMemory) + COPY_DEVICE_TO_VMA_FUNC(vkFreeMemory, vkFreeMemory) + COPY_DEVICE_TO_VMA_FUNC(vkMapMemory, vkMapMemory) + COPY_DEVICE_TO_VMA_FUNC(vkUnmapMemory, vkUnmapMemory) + COPY_DEVICE_TO_VMA_FUNC(vkFlushMappedMemoryRanges, vkFlushMappedMemoryRanges) + COPY_DEVICE_TO_VMA_FUNC(vkInvalidateMappedMemoryRanges, vkInvalidateMappedMemoryRanges) + COPY_DEVICE_TO_VMA_FUNC(vkBindBufferMemory, vkBindBufferMemory) + COPY_DEVICE_TO_VMA_FUNC(vkBindImageMemory, vkBindImageMemory) + COPY_DEVICE_TO_VMA_FUNC(vkGetBufferMemoryRequirements, vkGetBufferMemoryRequirements) + COPY_DEVICE_TO_VMA_FUNC(vkGetImageMemoryRequirements, vkGetImageMemoryRequirements) + COPY_DEVICE_TO_VMA_FUNC(vkCreateBuffer, vkCreateBuffer) + COPY_DEVICE_TO_VMA_FUNC(vkDestroyBuffer, vkDestroyBuffer) + COPY_DEVICE_TO_VMA_FUNC(vkCreateImage, vkCreateImage) + COPY_DEVICE_TO_VMA_FUNC(vkDestroyImage, vkDestroyImage) + COPY_DEVICE_TO_VMA_FUNC(vkCmdCopyBuffer, vkCmdCopyBuffer) +#if VMA_VULKAN_VERSION >= 1001000 + if (pAllocatorCreateInfo->vulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + COPY_GLOBAL_TO_VMA_FUNC(vkGetPhysicalDeviceMemoryProperties2, vkGetPhysicalDeviceMemoryProperties2KHR) + COPY_DEVICE_TO_VMA_FUNC(vkGetBufferMemoryRequirements2, vkGetBufferMemoryRequirements2KHR) + COPY_DEVICE_TO_VMA_FUNC(vkGetImageMemoryRequirements2, vkGetImageMemoryRequirements2KHR) + COPY_DEVICE_TO_VMA_FUNC(vkBindBufferMemory2, vkBindBufferMemory2KHR) + COPY_DEVICE_TO_VMA_FUNC(vkBindImageMemory2, vkBindImageMemory2KHR) + } +#endif +#if VMA_VULKAN_VERSION >= 1003000 + if (pAllocatorCreateInfo->vulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + COPY_DEVICE_TO_VMA_FUNC(vkGetDeviceBufferMemoryRequirements, vkGetDeviceBufferMemoryRequirements) + COPY_DEVICE_TO_VMA_FUNC(vkGetDeviceImageMemoryRequirements, vkGetDeviceImageMemoryRequirements) + } +#endif +#if VMA_KHR_MAINTENANCE4 + if((pAllocatorCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT) != 0) + { + COPY_DEVICE_TO_VMA_FUNC(vkGetDeviceBufferMemoryRequirementsKHR, vkGetDeviceBufferMemoryRequirements) + COPY_DEVICE_TO_VMA_FUNC(vkGetDeviceImageMemoryRequirementsKHR, vkGetDeviceImageMemoryRequirements) + } +#endif +#if VMA_DEDICATED_ALLOCATION + if ((pAllocatorCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + COPY_DEVICE_TO_VMA_FUNC(vkGetBufferMemoryRequirements2KHR, vkGetBufferMemoryRequirements2KHR) + COPY_DEVICE_TO_VMA_FUNC(vkGetImageMemoryRequirements2KHR, vkGetImageMemoryRequirements2KHR) + } +#endif +#if VMA_BIND_MEMORY2 + if ((pAllocatorCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + COPY_DEVICE_TO_VMA_FUNC(vkBindBufferMemory2KHR, vkBindBufferMemory2KHR) + COPY_DEVICE_TO_VMA_FUNC(vkBindImageMemory2KHR, vkBindImageMemory2KHR) + } +#endif +#if VMA_MEMORY_BUDGET + if ((pAllocatorCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + COPY_GLOBAL_TO_VMA_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, vkGetPhysicalDeviceMemoryProperties2KHR) + } +#endif +#if VMA_EXTERNAL_MEMORY_WIN32 + if ((pAllocatorCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT) != 0) + { + COPY_DEVICE_TO_VMA_FUNC(vkGetMemoryWin32HandleKHR, vkGetMemoryWin32HandleKHR) + } +#endif + +#undef COPY_DEVICE_TO_VMA_FUNC +#undef COPY_GLOBAL_TO_VMA_FUNC + + return VK_SUCCESS; +} + +#endif // #ifdef VOLK_HEADER_VERSION + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ + VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 4)); + VMA_DEBUG_LOG("vmaCreateAllocator"); + *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); + VkResult result = (*pAllocator)->Init(pCreateInfo); + if(result < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); + *pAllocator = VK_NULL_HANDLE; + } + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyAllocator"); + VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, allocator); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) +{ + VMA_ASSERT(allocator && pAllocatorInfo); + pAllocatorInfo->instance = allocator->m_hInstance; + pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); + pAllocatorInfo->device = allocator->m_hDevice; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceProperties); + *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); + *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ + VMA_ASSERT(allocator && pFlags); + VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); + *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->SetCurrentFrameIndex(frameIndex); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator allocator, + VmaTotalStatistics* pStats) +{ + VMA_ASSERT(allocator && pStats); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->CalculateStatistics(pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator allocator, + VmaBudget* pBudgets) +{ + VMA_ASSERT(allocator && pBudgets); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount()); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ + VMA_ASSERT(allocator && ppStatsString); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VmaStringBuilder sb(allocator->GetAllocationCallbacks()); + { + VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; + allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); + + VmaTotalStatistics stats; + allocator->CalculateStatistics(&stats); + + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); + { + json.WriteString("General"); + json.BeginObject(); + { + const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; + const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; + + json.WriteString("API"); + json.WriteString("Vulkan"); + + json.WriteString("apiVersion"); + json.BeginString(); + json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion)); + json.EndString(); + + json.WriteString("GPU"); + json.WriteString(deviceProperties.deviceName); + json.WriteString("deviceType"); + json.WriteNumber(static_cast(deviceProperties.deviceType)); + + json.WriteString("maxMemoryAllocationCount"); + json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); + json.WriteString("bufferImageGranularity"); + json.WriteNumber(deviceProperties.limits.bufferImageGranularity); + json.WriteString("nonCoherentAtomSize"); + json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); + + json.WriteString("memoryHeapCount"); + json.WriteNumber(memoryProperties.memoryHeapCount); + json.WriteString("memoryTypeCount"); + json.WriteNumber(memoryProperties.memoryTypeCount); + } + json.EndObject(); + } + { + json.WriteString("Total"); + VmaPrintDetailedStatistics(json, stats.total); + } + { + json.WriteString("MemoryInfo"); + json.BeginObject(); + { + for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { + json.BeginString("Heap "); + json.ContinueString(heapIndex); + json.EndString(); + json.BeginObject(); + { + const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; + json.WriteString("Flags"); + json.BeginArray(true); + { + if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + #if VMA_VULKAN_VERSION >= 1001000 + if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) + json.WriteString("MULTI_INSTANCE"); + #endif + + VkMemoryHeapFlags flags = heapInfo.flags & + ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT + #endif + ); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Size"); + json.WriteNumber(heapInfo.size); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BudgetBytes"); + json.WriteNumber(budgets[heapIndex].budget); + json.WriteString("UsageBytes"); + json.WriteNumber(budgets[heapIndex].usage); + } + json.EndObject(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); + + json.WriteString("MemoryPools"); + json.BeginObject(); + { + for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("Flags"); + json.BeginArray(true); + { + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) + json.WriteString("HOST_VISIBLE"); + if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) + json.WriteString("HOST_COHERENT"); + if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) + json.WriteString("HOST_CACHED"); + if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) + json.WriteString("LAZILY_ALLOCATED"); + #if VMA_VULKAN_VERSION >= 1001000 + if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) + json.WriteString("PROTECTED"); + #endif + #if VK_AMD_device_coherent_memory + if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) + json.WriteString("DEVICE_COHERENT_AMD"); + if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) + json.WriteString("DEVICE_UNCACHED_AMD"); + #endif + + flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT + #endif + #if VK_AMD_device_coherent_memory + | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY + | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY + #endif + | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT + | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT + | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); + } + json.EndObject(); + } + } + + } + json.EndObject(); + } + json.EndObject(); + } + } + json.EndObject(); + } + + if (detailedMap == VK_TRUE) + allocator->PrintDetailedMap(json); + + json.EndObject(); + } + + *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(allocator); + VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); + } +} + +#endif // VMA_STATS_STRING_ENABLED + +/* +This function is not protected by any mutex because it just reads immutable data. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage::UNKNOWN, pMemoryTypeIndex); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pBufferCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res = VK_SUCCESS; + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + if (funcs->vkGetDeviceBufferMemoryRequirements && + (allocator->m_UseKhrMaintenance4 || allocator->m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))) + { + // Can query straight from VkBufferCreateInfo :) + VkDeviceBufferMemoryRequirementsKHR devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR}; + devBufMemReq.pCreateInfo = pBufferCreateInfo; + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); + + return allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex); + } +#endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + + // Must create a dummy buffer to query :( + VkBuffer hBuffer = VK_NULL_HANDLE; + res = funcs->vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex); + + funcs->vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pImageCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res = VK_SUCCESS; + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceImageMemoryRequirements && + (allocator->m_UseKhrMaintenance4 || allocator->m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))) + { + // Can query straight from VkImageCreateInfo :) + VkDeviceImageMemoryRequirementsKHR devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR}; + devImgMemReq.pCreateInfo = pImageCreateInfo; + VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && + "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); + + return allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex); + } +#endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + + // Must create a dummy image to query :( + VkImage hImage = VK_NULL_HANDLE; + res = funcs->vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex); + + funcs->vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ + VMA_ASSERT(allocator && pCreateInfo && pPool); + + VMA_DEBUG_LOG("vmaCreatePool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CreatePool(pCreateInfo, pPool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ + VMA_ASSERT(allocator); + + if(pool == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyPool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->DestroyPool(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaDetailedStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->CalculatePoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VMA_DEBUG_LOG("vmaCheckPoolCorruption"); + + return allocator->CheckPoolCorruption(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool && ppName); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + VMA_NULL, // pMemoryAllocateNext + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateDedicatedMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + void* pMemoryAllocateNext, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateDedicatedMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + true, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + pMemoryAllocateNext, + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + VMA_NULL, // pMemoryAllocateNext + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + VMA_NULL, // pMemoryAllocateNext + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + image, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + VMA_NULL, // pMemoryAllocateNext + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaFreeMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory(allocationCount, pAllocations); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetAllocationInfo(allocation, pAllocationInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo2* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetAllocationInfo2(allocation, pAllocationInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocation->SetUserData(allocator, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName) +{ + allocation->SetName(allocator, pName); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) +{ + VMA_ASSERT(allocator && allocation && pFlags); + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ + VMA_ASSERT(allocator && allocation && ppData); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->Map(allocation, ppData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->Unmap(allocation); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaFlushAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaInvalidateAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaFlushAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaInvalidateAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation( + VmaAllocator allocator, + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && pSrcHostPointer && dstAllocation); + + if(size == 0) + { + return VK_SUCCESS; + } + + VMA_DEBUG_LOG("vmaCopyMemoryToAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CopyMemoryToAllocation(pSrcHostPointer, dstAllocation, dstAllocationLocalOffset, size); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory( + VmaAllocator allocator, + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && srcAllocation && pDstHostPointer); + + if(size == 0) + { + return VK_SUCCESS; + } + + VMA_DEBUG_LOG("vmaCopyAllocationToMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CopyAllocationToMemory(srcAllocation, srcAllocationLocalOffset, pDstHostPointer, size); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator allocator, + uint32_t memoryTypeBits) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaCheckCorruption"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CheckCorruption(memoryTypeBits); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator allocator, + const VmaDefragmentationInfo* pInfo, + VmaDefragmentationContext* pContext) +{ + VMA_ASSERT(allocator && pInfo && pContext); + + VMA_DEBUG_LOG("vmaBeginDefragmentation"); + + if (pInfo->pool != VMA_NULL) + { + // Check if run on supported algorithms + if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); + return VK_SUCCESS; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationStats* pStats) +{ + VMA_ASSERT(allocator && context); + + VMA_DEBUG_LOG("vmaEndDefragmentation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if (pStats) + context->GetStats(*pStats); + vma_delete(allocator, context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassBegin(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassEnd(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + VMA_DEBUG_LOG("vmaCreateBuffer"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + + return allocator->CreateBuffer(pBufferCreateInfo, pAllocationCreateInfo, + VMA_NULL, // pMemoryAllocateNext + pBuffer, pAllocation, pAllocationInfo); + +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); + VMA_DEBUG_LOG("vmaCreateBufferWithAlignment"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + + VmaAllocationCreateInfo allocCreateInfoCopy = *pAllocationCreateInfo; + allocCreateInfoCopy.minAlignment = VMA_MAX(allocCreateInfoCopy.minAlignment, minAlignment); + + return allocator->CreateBuffer(pBufferCreateInfo, &allocCreateInfoCopy, + VMA_NULL, // pMemoryAllocateNext + pBuffer, pAllocation, pAllocationInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateDedicatedBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + void* pMemoryAllocateNext, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + VMA_DEBUG_LOG("vmaCreateDedicatedBuffer"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + + VmaAllocationCreateInfo allocCreateInfoCopy = *pAllocationCreateInfo; + allocCreateInfoCopy.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + + return allocator->CreateBuffer(pBufferCreateInfo, &allocCreateInfoCopy, + pMemoryAllocateNext, // pMemoryAllocateNext + pBuffer, pAllocation, pAllocationInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); + VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize()); + + VMA_DEBUG_LOG("vmaCreateAliasingBuffer2"); + + *pBuffer = VK_NULL_HANDLE; + + if (pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if (res >= 0) + { + // 2. Bind buffer with memory. + res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(buffer != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); + } + + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + VMA_ASSERT((pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && + "vmaCreateImage() doesn't support disjoint multi-planar images. Please allocate memory for the planes using vmaAllocateMemory() and bind them using vmaBindImageMemory2()."); + VMA_DEBUG_LOG("vmaCreateImage"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + + return allocator->CreateImage(pImageCreateInfo, pAllocationCreateInfo, + VMA_NULL, // pMemoryAllocateNext + pImage, pAllocation, pAllocationInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateDedicatedImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + void* pMemoryAllocateNext, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + VMA_ASSERT((pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && + "vmaCreateDedicatedImage() doesn't support disjoint multi-planar images. Please allocate memory for the planes using vmaAllocateMemory() and bind them using vmaBindImageMemory2()."); + VMA_DEBUG_LOG("vmaCreateDedicatedImage"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + + VmaAllocationCreateInfo allocCreateInfoCopy = *pAllocationCreateInfo; + allocCreateInfoCopy.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + + return allocator->CreateImage(pImageCreateInfo, &allocCreateInfoCopy, + pMemoryAllocateNext, // pMemoryAllocateNext + pImage, pAllocation, pAllocationInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); + + *pImage = VK_NULL_HANDLE; + + VMA_DEBUG_LOG("vmaCreateImage2"); + + if (pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if (res >= 0) + { + // 2. Bind image with memory. + res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation) +{ + VMA_ASSERT(allocator); + + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(image != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); + } + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) +{ + VMA_ASSERT(pCreateInfo && pVirtualBlock); + VMA_ASSERT(pCreateInfo->size > 0); + VMA_DEBUG_LOG("vmaCreateVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); + return VK_SUCCESS; + + /* + Code for the future if we ever need a separate Init() method that could fail: + + VkResult res = (*pVirtualBlock)->Init(); + if(res < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); + *pVirtualBlock = VK_NULL_HANDLE; + } + return res; + */ +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) +{ + if(virtualBlock != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, virtualBlock); + } +} + +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); + VMA_DEBUG_LOG("vmaVirtualAllocate"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) +{ + if(allocation != VK_NULL_HANDLE) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaVirtualFree"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Free(allocation); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaClearVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Clear(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->SetAllocationUserData(allocation, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetStatistics(*pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->CalculateDetailedStatistics(*pStats); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); + VmaStringBuilder sb(allocationCallbacks); + virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); + *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString); + } +} +#if VMA_EXTERNAL_MEMORY_WIN32 +VMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle(VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, HANDLE hTargetProcess, HANDLE* VMA_NOT_NULL pHandle) +{ + VMA_ASSERT(allocator && allocation && pHandle); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return allocation->GetWin32Handle(allocator, VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, hTargetProcess, pHandle); +} +VMA_CALL_PRE VkResult VMA_CALL_POST vmaGetMemoryWin32Handle2(VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE hTargetProcess, HANDLE* VMA_NOT_NULL pHandle) +{ + VMA_ASSERT(allocator && allocation && pHandle); + VMA_ASSERT(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR || + handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR || + handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR || + handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR || + handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR || + handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return allocation->GetWin32Handle(allocator, handleType, hTargetProcess, pHandle); +} +#endif // VMA_EXTERNAL_MEMORY_WIN32 +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_PUBLIC_INTERFACE +#endif // VMA_IMPLEMENTATION + +/** +\page faq Frequently asked questions + +What is VMA? + +Vulkan(R) Memory Allocator (VMA) is a software library for developers who use the Vulkan graphics API in their code. +It is written in C++. + +What is the license of VMA? + +VMA is licensed under MIT, which means it is open source and free software. + +What is the purpose of VMA? + +VMA helps with handling one aspect of Vulkan usage, which is device memory management - +allocation of `VkDeviceMemory` objects, and creation of `VkBuffer` and `VkImage` objects. + +Do I need to use VMA? + +You don't need to, but it may be beneficial in many cases. +Vulkan is a complex and low-level API, so libraries like this that abstract certain aspects of the API +and bring them to a higher level are useful. +When developing any non-trivial Vulkan application, you likely need to use a memory allocator. +Using VMA can save time compared to implementing your own. + +When should I not use VMA? + +While VMA is useful for most applications that use the Vulkan API, there are cases +when it may be a better choice not to use it. +For example, if the application is very simple, e.g. serving as a sample or a learning exercise +to help you understand or teach others the basics of Vulkan, +and it creates only a small number of buffers and images, then including VMA may be an overkill. +Developing your own memory allocator may also be a good learning exercise. + +What are the benefits of using VMA? + +-# VMA helps in choosing the optimal memory type for your resource (buffer or image). + In Vulkan, we have a two-level hierarchy of memory heaps and types with different flags, + and each device can expose a different set of those. + Implementing logic that would select the best memory type on each platform is a non-trivial task. + VMA does that, expecting only a high-level description of the intended usage of your resource. + For more information, see \subpage choosing_memory_type. +-# VMA allocates large blocks of `VkDeviceMemory` and sub-allocates parts of them for your resources. + Allocating a new block of device memory may be a time-consuming operation. + Some platforms also have a limit on the maximum number of those blocks (`VkPhysicalDeviceLimits::maxMemoryAllocationCount`) + as low as 4096, so allocating a separate one for each resource is not an option. + Sub-allocating parts of a memory block requires implementing an allocation algorithm, + which is a non-trivial task. + VMA does that, using an advanced and efficient algorithm that works well in various use cases. +-# VMA offers a simple API that allows creating buffers and textures within one function call. + In Vulkan, the creation of a resource is a multi-step process. + You need to create a `VkBuffer` or `VkImage`, ask it for memory requirements, + allocate a `VkDeviceMemory` object, and finally bind the resource to the memory block. + VMA does that automatically under a simple API within one function call: vmaCreateBuffer(), vmaCreateImage(). + +The library is doing much more under the hood. +For example, it respects limits like `bufferImageGranularity`, `nonCoherentAtomSize`, +and `VkMemoryDedicatedRequirements` automatically, so you don't need to think about it. + +Which version should I pick? + +You can just pick [the latest version from the "master" branch](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator). +It is kept in a good shape most of the time, compiling and working correctly, +with no compatibility-breaking changes and no unfinished code. + +If you want an even more stable version, you can pick +[the latest official release](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/releases). +Current code from the master branch is occasionally tagged as a release, +with [CHANGELOG](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/blob/master/CHANGELOG.md) +carefully curated to enumerate all important changes since the previous version. + +The library uses [Semantic Versioning](https://semver.org/), +which means versions that only differ in the patch number are forward and backward compatible +(e.g., only fixing some bugs), while versions that differ in the minor number are backward compatible +(e.g., only adding new functions to the API, but not removing or changing existing ones). + +How to integrate it with my code? + +VMA is an STB-style single-header C++ library. + +You can pull the entire GitHub repository, e.g. using Git submodules. +The repository contains ancillary files like the Cmake script, Doxygen config file, +sample application, test suite, and others. +You can compile it as a library and link with your project. + +However, a simpler way is taking the single file "include/vk_mem_alloc.h" and including it in your project. +This extensive file contains all you need: a copyright notice, +declarations of the public library interface (API), its internal implementation, +and even the documentation in form of Doxygen-style comments. + +The "STB style" means not everything is implemented as inline functions in the header file. +You need to extract the internal implementation using a special macro. +This means that in every .cpp file where you need to use the library you should +`#include "vk_mem_alloc.h"` to include its public interface, +but additionally in exactly one .cpp file you should `#define VMA_IMPLEMENTATION` +before this `#include` to enable its internal implementation. +For more information, see [Project setup](@ref quick_start_project_setup). + +Does the library work with C or C++? + +The internal implementation of VMA is written in C++. +It is distributed in the source format, so you need a compiler supporting at least C++14 to build it. + +However, the public interface of the library is written in C - using only enums, structs, and global functions, +in the same style as Vulkan, so you can use the library in the C code. + +I am not a fan of modern C++. Can I still use it? + +Very likely yes. +We acknowledge that many C++ developers, especially in the games industry, +do not appreciate all the latest features that the language has to offer. + +- VMA doesn't throw or catch any C++ exceptions. + It reports errors by returning a `VkResult` value instead, just like Vulkan. + If you don't use exceptions in your project, your code is not exception-safe, + or even if you disable exception handling in the compiler options, you can still use VMA. +- VMA doesn't use C++ run-time type information like `typeid` or `dynamic_cast`, + so if you disable RTTI in the compiler options, you can still use the library. +- VMA uses only a limited subset of standard C and C++ library. + It doesn't use STL containers like `std::vector`, `map`, or `string`, + either in the public interface nor in the internal implementation. + It implements its own containers instead. +- If you don't use the default heap memory allocator through `malloc/free` or `new/delete` + but implement your own allocator instead, you can pass it to VMA and + the library will use your functions for every dynamic heap allocation made internally, + as well as passing it further to Vulkan functions. For details, see [Custom host memory allocator](@ref custom_memory_allocator). + +Is it available for other programming languages? + +VMA is a C++ library with C interface in similar style as Vulkan. +An object-oriented C++ wrapper or bindings to other programming languages are out of scope of this project, +but they are welcome as external projects. +Some of them are listed in [README.md, "See also" section](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator?tab=readme-ov-file#see-also), +including binding to C++, Python, Rust, and Haskell. +Before using any of them, please check if they are still maintained and updated to use a recent version of VMA. + +What platforms does it support? + +VMA relies only on Vulkan and some parts of the standard C and C++ library, +so it supports any platform where a C++ compiler and Vulkan are available. +It is developed mostly on Microsoft(R) Windows(R), +but it has been successfully used in Linux(R), MacOS, Android, and even FreeBSD and Raspberry Pi. + +Does it only work on AMD GPUs? + +No! While VMA is published by AMD, it works on any GPU that supports Vulkan, +whether a discrete PC graphics card, a processor integrated graphics, or a mobile SoC. +It doesn't give AMD GPUs any advantage over any other GPUs. + +What Vulkan versions and extensions are supported? + +VMA is updated to support the latest versions of Vulkan. +It currently supports Vulkan up to 1.4. +The library also supports older versions down to the first release of Vulkan 1.0. +Defining a higher minimum version support would help simplify the code, +but we acknowledge that developers on some platforms like Android still use older versions, +so the support is provided for all of them. + +Among many extensions available for Vulkan, only a few interact with memory management. +VMA can automatically take advantage of them. Some of them are: +VK_EXT_memory_budget, VK_EXT_memory_priority, VK_KHR_external_memory_win32, and VK_KHR_maintenance* +extensions that are later promoted to the new versions of the core Vulkan API. +To use them, it is your responsibility to validate if they are available on the current system and if so, +enable them while creating the Vulkan device object. +You also need to pass appropriate #VmaAllocatorCreateFlagBits to inform VMA that they are enabled. +Then, the library will automatically take advantage of them. +For more information and the full list of supported extensions, see [Enabling extensions](@ref quick_start_initialization_enabling_extensions). + +Does it support other graphics APIs, like Microsoft DirectX(R) 12? + +No, but we offer an equivalent library for DirectX 12: +[D3D12 Memory Allocator](https://github.com/GPUOpen-LibrariesAndSDKs/D3D12MemoryAllocator). +It uses the same core allocation algorithm. +It also shares many features with VMA, like the support for custom pools and virtual allocator. +However, it is not identical in terms of the features supported. +Its API also looks different, because while the interface of VMA is similar in style to Vulkan, +the interface of D3D12MA is similar to DirectX 12. + +Is the library lightweight? + +It depends on how you define it. +VMA is implemented with high-performance and real-time applications like video games in mind. +The CPU performance overhead of using this library is low. +It uses a high-quality allocation algorithm called Two-Level Segregated Fit (TLSF), +which in most cases can find a free place for a new allocation in few steps. +The library also doesn't perform too many CPU heap allocations. +In many cases, the allocation happens with 0 new CPU heap allocations performed by the library. +Even the creation of a #VmaAllocation object doesn't typically feature an CPU allocation, +because these objects are returned out of a dedicated memory pool. + +On the other hand, however, VMA needs some extra memory and extra time +to maintain the metadata about the occupied and free regions of the memory blocks, +and the algorithms and data structures used must be generic enough to work well in most cases. +If you develop your program for a very resource-constrained platform, +a custom allocator simpler than VMA may be a better choice. + +Does it have a documentation? + +Yes! VMA comes with full documentation of all elements of the API (functions, structures, enums), +as well as many generic chapters that provide an introduction, +describe core concepts of the library, good practices, etc. +The entire documentation is written in form of code comments inside "vk_mem_alloc.h", in Doxygen format. +You can access it in multiple ways: + +- Browsable online: https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/ +- Local HTML pages available after you clone the repository and open file "docs/html/index.html". +- You can rebuild the documentation in HTML or some other format from the source code using Doxygen. + Configuration file "Doxyfile" is part of the repository. +- Finally, you can just read the comments preceding declarations of any public functions of the library. + +Is it a mature project? + +Yes! The library is in development since June 2017, has over 1000 commits, over 500 issue tickets +and pull requests (most of them resolved), and over 80 contributors. +It is distributed together with Vulkan SDK. +It is used by many software projects, including some large and popular ones like Qt or Blender, +as well as some AAA games. +According to the [LunarG 2024 Ecosystem Survey](https://www.lunarg.com/2024-ecosystem-survey-progress-report-released/), +it is used by over 50% of Vulkan developers. + +How can I contribute to the project? + +If you have an idea for improvement or a feature request, +you can go to [the library repository](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) +and create an Issue ticket, describing your idea. +You can also implement it yourself by forking the repository, making changes to the code, +and creating a Pull request. + +If you want to ask a question, you can also create a ticket the same way. +Before doing this, please make sure you read the relevant part of the Vulkan specification and VMA documentation, +where you may find the answers to your question. + +If you want to report a suspected bug, you can also create a ticket the same way. +Before doing this, please put some effort into the investigation of whether the bug is really +in the library and not in your code or in the Vulkan implementation (the GPU driver) on your platform: + +- Enable Vulkan validation layer and make sure it is free from any errors. +- Make sure `VMA_ASSERT` is defined to an implementation that can report a failure and not ignore it. +- Try making your allocation using pure Vulkan functions rather than VMA and see if the bug persists. + +I found some compilation warnings. How can we fix them? + +Seeing compiler warnings may be annoying to some developers, +but it is a design decision to not fix all of them. +Due to the nature of the C++ language, certain preprocessor macros can make some variables unused, +function parameters unreferenced, or conditional expressions constant in some configurations. +The code of this library should not be bigger or more complicated just to silence these warnings. +It is recommended to disable such warnings instead. +For more information, see [Features not supported](@ref general_considerations_features_not_supported). + +However, if you observe a warning that is really dangerous, e.g., +about an implicit conversion from a larger to a smaller integer type, please report it and it will be fixed ASAP. + + +\page quick_start Quick start + +\section quick_start_project_setup Project setup + +Vulkan Memory Allocator comes in form of a "stb-style" single header file. +While you can pull the entire repository e.g. as Git module, there is also Cmake script provided, +you don't need to build it as a separate library project. +You can add file "vk_mem_alloc.h" directly to your project and submit it to code repository next to your other source files. + +"Single header" doesn't mean that everything is contained in C/C++ declarations, +like it tends to be in case of inline functions or C++ templates. +It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. +If you don't do it properly, it will result in linker errors. + +To do it properly: + +-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. + This includes declarations of all members of the library. +-# In exactly one CPP file define following macro before this include. + It enables also internal definitions. + +\code +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" +\endcode + +It may be a good idea to create dedicated CPP file just for this purpose, e.g. "VmaUsage.cpp". + +This library includes header ``, which in turn +includes `` on Windows. If you need some specific macros defined +before including these headers (like `WIN32_LEAN_AND_MEAN` or +`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define +them before every `#include` of this library. +It may be a good idea to create a dedicate header file for this purpose, e.g. "VmaUsage.h", +that will be included in other source files instead of VMA header directly. + +This library is written in C++, but has C-compatible interface. +Thus, you can include and use "vk_mem_alloc.h" in C or C++ code, but full +implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. +Some features of C++14 are used and required. Features of C++20 are used optionally when available. +Some headers of standard C and C++ library are used, but STL containers, RTTI, or C++ exceptions are not used. + + +\section quick_start_initialization Initialization + +VMA offers library interface in a style similar to Vulkan, with object handles like #VmaAllocation, +structures describing parameters of objects to be created like #VmaAllocationCreateInfo, +and errors codes returned from functions using `VkResult` type. + +The first and the main object that needs to be created is #VmaAllocator. +It represents the initialization of the entire library. +Only one such object should be created per `VkDevice`. +You should create it at program startup, after `VkDevice` was created, and before any device memory allocator needs to be made. +It must be destroyed before `VkDevice` is destroyed. + +At program startup: + +-# Initialize Vulkan to have `VkInstance`, `VkPhysicalDevice`, `VkDevice` object. +-# Fill VmaAllocatorCreateInfo structure and call vmaCreateAllocator() to create #VmaAllocator object. + +Only members `physicalDevice`, `device`, `instance` are required. +However, you should inform the library which Vulkan version do you use by setting +VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable +by setting VmaAllocatorCreateInfo::flags. +Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. +See below for details. + +\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version + +VMA supports Vulkan version down to 1.0, for backward compatibility. +If you want to use higher version, you need to inform the library about it. +This is a two-step process. + +Step 1: Compile time. By default, VMA compiles with code supporting the highest +Vulkan version found in the included `` that is also supported by the library. +If this is OK, you don't need to do anything. +However, if you want to compile VMA as if only some lower Vulkan version was available, +define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`. +It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version. +For example, to compile against Vulkan 1.2: + +\code +#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2 +#include "vk_mem_alloc.h" +\endcode + +Step 2: Runtime. Even when compiled with higher Vulkan version available, +VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object. +By default, only Vulkan 1.0 is used. +To initialize the allocator with support for higher Vulkan version, you need to set member +VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`. +See code sample below. + +\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions + +You may need to configure importing Vulkan functions. There are 4 ways to do this: + +-# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows): + - You don't need to do anything. + - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default. +-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`, + `vkGetDeviceProcAddr` (this is the option presented in the example below): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1. + - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr, + VmaVulkanFunctions::vkGetDeviceProcAddr. + - The library will fetch pointers to all other functions it needs internally. +-# **If you fetch pointers to all Vulkan functions in a custom way**: + - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. + - Pass these pointers via structure #VmaVulkanFunctions. +-# **If you use [volk library](https://github.com/zeux/volk)**: + - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. + - Use function vmaImportVulkanFunctionsFromVolk() to fill in the structure #VmaVulkanFunctions. + For more information, see the description of this function. + +\subsection quick_start_initialization_enabling_extensions Enabling extensions + +VMA can automatically use following Vulkan extensions. +If you found them available on the selected physical device and you enabled them +while creating `VkInstance` / `VkDevice` object, inform VMA about their availability +by setting appropriate flags in VmaAllocatorCreateInfo::flags. + +Vulkan extension | VMA flag +------------------------------|----------------------------------------------------- +VK_KHR_dedicated_allocation | #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT +VK_KHR_bind_memory2 | #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT +VK_KHR_maintenance4 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT +VK_KHR_maintenance5 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT +VK_EXT_memory_budget | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT +VK_KHR_buffer_device_address | #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +VK_EXT_memory_priority | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +VK_AMD_device_coherent_memory | #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +VK_KHR_external_memory_win32 | #VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT + +Example with fetching pointers to Vulkan functions dynamically: + +\code +#define VMA_STATIC_VULKAN_FUNCTIONS 0 +#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#include "vk_mem_alloc.h" + +... + +VmaVulkanFunctions vulkanFunctions = {}; +vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr; +vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr; + +VmaAllocatorCreateInfo allocatorCreateInfo = {}; +allocatorCreateInfo.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT; +allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; +allocatorCreateInfo.physicalDevice = physicalDevice; +allocatorCreateInfo.device = device; +allocatorCreateInfo.instance = instance; +allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; + +VmaAllocator allocator; +vmaCreateAllocator(&allocatorCreateInfo, &allocator); + +// Entire program... + +// At the end, don't forget to: +vmaDestroyAllocator(allocator); +\endcode + + +\subsection quick_start_initialization_other_config Other configuration options + +There are additional configuration options available through preprocessor macros that you can define +before including VMA header and through parameters passed in #VmaAllocatorCreateInfo. +They include a possibility to use your own callbacks for host memory allocations (`VkAllocationCallbacks`), +callbacks for device memory allocations (instead of `vkAllocateMemory`, `vkFreeMemory`), +or your custom `VMA_ASSERT` macro, among others. +For more information, see: @ref configuration. + + +\section quick_start_resource_allocation Resource allocation + +When you want to create a buffer or image: + +-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. +-# Fill VmaAllocationCreateInfo structure. +-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory + already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +Don't forget to destroy your buffer and allocation objects when no longer needed: + +\code +vmaDestroyBuffer(allocator, buffer, allocation); +\endcode + +If you need to map the buffer, you must set flag +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. +There are many additional parameters that can control the choice of memory type to be used for the allocation +and other features. +For more information, see documentation chapters: @ref choosing_memory_type, @ref memory_mapping. + + +\page choosing_memory_type Choosing memory type + +Physical devices in Vulkan support various combinations of memory heaps and +types. Help with choosing correct and optimal memory type for your specific +resource is one of the key features of this library. You can use it by filling +appropriate members of VmaAllocationCreateInfo structure, as described below. +You can also combine multiple methods. + +-# If you just want to find memory type index that meets your requirements, you + can use function: vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). +-# If you want to allocate a region of device memory without association with any + specific image or buffer, you can use function vmaAllocateMemory(). Usage of + this function is not recommended and usually not needed. + vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, + which may be useful for sparse binding. +-# If you already have a buffer or an image created, you want to allocate memory + for it and then you will bind it yourself, you can use function + vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). +-# If you want to create a buffer or an image, allocate memory for it, and bind + them together, all in one call, you can use function vmaCreateBuffer(), + vmaCreateImage(). + This is the easiest and recommended way to use this library! + +When using 3. or 4., the library internally queries Vulkan for memory types +supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) +and uses only one of these types. + +If no memory type can be found that meets all the requirements, these functions +return `VK_ERROR_FEATURE_NOT_PRESENT`. + +You can leave VmaAllocationCreateInfo structure completely filled with zeros. +It means no requirements are specified for memory type. +It is valid, although not very useful. + +\section choosing_memory_type_usage Usage + +The easiest way to specify memory requirements is to fill member +VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. +It defines high level, common usage types. +Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. + +For example, if you want to create a uniform buffer that will be filled using +transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can +do it using following code. The buffer will most likely end up in a memory type with +`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory +on systems with discrete graphics card that have the memories separate, you can use +#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. + +When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, +you also need to specify one of the host access flags: +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +so you can map it. + +For example, a staging buffer that will be filled via mapped pointer and then +used as a source of transfer to the buffer described previously can be created like this. +It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` +but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). + +\code +VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +stagingBufferInfo.size = 65536; +stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo stagingAllocInfo = {}; +stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; +stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer stagingBuffer; +VmaAllocation stagingAllocation; +vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); +\endcode + +For more examples of creating different kinds of resources, see chapter \ref usage_patterns. +See also: @ref memory_mapping. + +Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows +about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, +so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. +If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting +memory type, as described below. + +\note +Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, +`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) +are still available and work same way as in previous versions of the library +for backward compatibility, but they are deprecated. + +\section choosing_memory_type_required_preferred_flags Required and preferred flags + +You can specify more detailed requirements by filling members +VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags +with a combination of bits from enum `VkMemoryPropertyFlags`. For example, +if you want to create a buffer that will be persistently mapped on host (so it +must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, +use following code: + +\code +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; +allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +A memory type is chosen that has all the required flags and as many preferred +flags set as possible. + +Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, +plus some extra "magic" (heuristics). + +\section choosing_memory_type_explicit_memory_types Explicit memory types + +If you inspected memory types available on the physical device and you have +a preference for memory types that you want to use, you can fill member +VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set +means that a memory type with that index is allowed to be used for the +allocation. Special value 0, just like `UINT32_MAX`, means there are no +restrictions to memory type index. + +Please note that this member is NOT just a memory type index. +Still you can use it to choose just one, specific memory type. +For example, if you already determined that your buffer should be created in +memory type 2, use following code: + +\code +uint32_t memoryTypeIndex = 2; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.memoryTypeBits = 1U << memoryTypeIndex; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +You can also use this parameter to exclude some memory types. +If you inspect memory heaps and types available on the current physical device and +you determine that for some reason you don't want to use a specific memory type for the allocation, +you can enable automatic memory type selection but exclude certain memory type or types +by setting all bits of `memoryTypeBits` to 1 except the ones you choose. + +\code +// ... +uint32_t excludedMemoryTypeIndex = 2; +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocInfo.memoryTypeBits = ~(1U << excludedMemoryTypeIndex); +// ... +\endcode + + +\section choosing_memory_type_custom_memory_pools Custom memory pools + +If you allocate from custom memory pool, all the ways of specifying memory +requirements described above are not applicable and the aforementioned members +of VmaAllocationCreateInfo structure are ignored. Memory type is selected +explicitly when creating the pool and then used to make all the allocations from +that pool. For further details, see \ref custom_memory_pools. + +\section choosing_memory_type_dedicated_allocations Dedicated allocations + +Memory for allocations is reserved out of larger block of `VkDeviceMemory` +allocated from Vulkan internally. That is the main feature of this whole library. +You can still request a separate memory block to be created for an allocation, +just like you would do in a trivial solution without using any allocator. +In that case, a buffer or image is always bound to that memory at offset 0. +This is called a "dedicated allocation". +You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +The library can also internally decide to use dedicated allocation in some cases, e.g.: + +- When the size of the allocation is large. +- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled + and it reports that dedicated allocation is required or recommended for the resource. +- When allocation of next big memory block fails due to not enough device memory, + but allocation with the exact requested size succeeds. + + +\page memory_mapping Memory mapping + +To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, +to be able to read from it or write to it in CPU code. +Mapping is possible only of memory allocated from a memory type that has +`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. +You can use them directly with memory allocated by this library, +but it is not recommended because of following issue: +Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. +This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. +It is also not thread-safe. +Because of this, Vulkan Memory Allocator provides following facilities: + +\note If you want to be able to map an allocation, you need to specify one of the flags +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable +when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. +For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, +but these flags can still be used for consistency. + +\section memory_mapping_copy_functions Copy functions + +The easiest way to copy data from a host pointer to an allocation is to use convenience function vmaCopyMemoryToAllocation(). +It automatically maps the Vulkan memory temporarily (if not already mapped), performs `memcpy`, +and calls `vkFlushMappedMemoryRanges` (if required - if memory type is not `HOST_COHERENT`). + +It is also the safest one, because using `memcpy` avoids a risk of accidentally introducing memory reads +(e.g. by doing `pMappedVectors[i] += v`), which may be very slow on memory types that are not `HOST_CACHED`. + +\code +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData = ... + +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer buf; +VmaAllocation alloc; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); + +vmaCopyMemoryToAllocation(allocator, &constantBufferData, alloc, 0, sizeof(ConstantBuffer)); +\endcode + +Copy in the other direction - from an allocation to a host pointer can be performed the same way using function vmaCopyAllocationToMemory(). + +\section memory_mapping_mapping_functions Mapping functions + +The library provides following functions for mapping of a specific allocation: vmaMapMemory(), vmaUnmapMemory(). +They are safer and more convenient to use than standard Vulkan functions. +You can map an allocation multiple times simultaneously - mapping is reference-counted internally. +You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. +The way it is implemented is that the library always maps entire memory block, not just region of the allocation. +For further details, see description of vmaMapMemory() function. +Example: + +\code +// Having these objects initialized: +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData = ... + +VmaAllocator allocator = ... +VkBuffer constantBuffer = ... +VmaAllocation constantBufferAllocation = ... + +// You can map and fill your buffer using following code: + +void* mappedData; +vmaMapMemory(allocator, constantBufferAllocation, &mappedData); +memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); +vmaUnmapMemory(allocator, constantBufferAllocation); +\endcode + +When mapping, you may see a warning from Vulkan validation layer similar to this one: + +Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. + +It happens because the library maps entire `VkDeviceMemory` block, where different +types of images and buffers may end up together, especially on GPUs with unified memory like Intel. +You can safely ignore it if you are sure you access only memory of the intended +object that you wanted to map. + + +\section memory_mapping_persistently_mapped_memory Persistently mapped memory + +Keeping your memory persistently mapped is generally OK in Vulkan. +You don't need to unmap it before using its data on the GPU. +The library provides a special feature designed for that: +Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in +VmaAllocationCreateInfo::flags stay mapped all the time, +so you can just access CPU pointer to it any time +without a need to call any "map" or "unmap" function. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +// Buffer is already mapped. You can access its memory. +memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +\endcode + +\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up +in a mappable memory type. +For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. +For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. + +\section memory_mapping_cache_control Cache flush and invalidate + +Memory in Vulkan doesn't need to be unmapped before using it on GPU, +but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. +Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, +`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient +functions that refer to given allocation object: vmaFlushAllocation(), +vmaInvalidateAllocation(), +or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). + +Regions of memory specified for flush/invalidate must be aligned to +`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. +In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations +within blocks are aligned to this value, so their offsets are always multiply of +`nonCoherentAtomSize` and two different allocations never share same "line" of this size. + +Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) +currently provide `HOST_COHERENT` flag on all memory types that are +`HOST_VISIBLE`, so on PC you may not need to bother. + + +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it is physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetHeapBudgets(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every +allocation, while vmaCalculateStatistics() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use VK_EXT_memory_budget device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +VMA then tries to make the allocation from the next eligible Vulkan memory type. +If all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +On AMD graphics cards there is a custom vendor extension available: VK_AMD_memory_overallocation_behavior +that allows to control the behavior of the Vulkan implementation in out-of-memory cases - +whether it should fail with an error code or still allow the allocation. +Usage of this extension involves only passing extra structure on Vulkan device creation, +so it is out of scope of this library. + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will currently try to allocate memory blocks without checking whether they +fit within budget. + + +\page resource_aliasing Resource aliasing (overlap) + +New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory +management, give an opportunity to alias (overlap) multiple resources in the +same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). +It can be useful to save video memory, but it must be used with caution. + +For example, if you know the flow of your whole render frame in advance, you +are going to use some intermediate textures or buffers only during a small range of render passes, +and you know these ranges don't overlap in time, you can bind these resources to +the same place in memory, even if they have completely different parameters (width, height, format etc.). + +![Resource aliasing (overlap)](../gfx/Aliasing.png) + +Such scenario is possible using VMA, but you need to create your images manually. +Then you need to calculate parameters of an allocation to be made using formula: + +- allocation size = max(size of each image) +- allocation alignment = max(alignment of each image) +- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) + +Following example shows two different images bound to the same place in memory, +allocated to fit largest of them. + +\code +// A 512x512 texture to be sampled. +VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img1CreateInfo.extent.width = 512; +img1CreateInfo.extent.height = 512; +img1CreateInfo.extent.depth = 1; +img1CreateInfo.mipLevels = 10; +img1CreateInfo.arrayLayers = 1; +img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; +img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; +img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +// A full screen texture to be used as color attachment. +VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img2CreateInfo.extent.width = 1920; +img2CreateInfo.extent.height = 1080; +img2CreateInfo.extent.depth = 1; +img2CreateInfo.mipLevels = 1; +img2CreateInfo.arrayLayers = 1; +img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VkImage img1; +res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); +VkImage img2; +res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); + +VkMemoryRequirements img1MemReq; +vkGetImageMemoryRequirements(device, img1, &img1MemReq); +VkMemoryRequirements img2MemReq; +vkGetImageMemoryRequirements(device, img2, &img2MemReq); + +VkMemoryRequirements finalMemReq = {}; +finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); +finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); +finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; +// Validate if(finalMemReq.memoryTypeBits != 0) + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + +VmaAllocation alloc; +res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); + +res = vmaBindImageMemory(allocator, alloc, img1); +res = vmaBindImageMemory(allocator, alloc, img2); + +// You can use img1, img2 here, but not at the same time! + +vmaFreeMemory(allocator, alloc); +vkDestroyImage(allocator, img2, nullptr); +vkDestroyImage(allocator, img1, nullptr); +\endcode + +VMA also provides convenience functions that create a buffer or image and bind it to memory +represented by an existing #VmaAllocation: +vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(), +vmaCreateAliasingImage(), vmaCreateAliasingImage2(). +Versions with "2" offer additional parameter `allocationLocalOffset`. + +Remember that using resources that alias in memory requires proper synchronization. +You need to issue a memory barrier to make sure commands that use `img1` and `img2` +don't overlap on GPU timeline. +You also need to treat a resource after aliasing as uninitialized - containing garbage data. +For example, if you use `img1` and then want to use `img2`, you need to issue +an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. + +Additional considerations: + +- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. +See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. +- You can create more complex layout where different images and buffers are bound +at different offsets inside one large allocation. For example, one can imagine +a big texture used in some render passes, aliasing with a set of many small buffers +used between in some further passes. To bind a resource at non-zero offset in an allocation, +use vmaBindBufferMemory2() / vmaBindImageMemory2(). +- Before allocating memory for the resources you want to alias, check `memoryTypeBits` +returned in memory requirements of each resource to make sure the bits overlap. +Some GPUs may expose multiple memory types suitable e.g. only for buffers or +images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your +resources may be disjoint. Aliasing them is not possible in that case. + + +\page custom_memory_pools Custom memory pools + +A memory pool contains a number of `VkDeviceMemory` blocks. +The library automatically creates and manages default pool for each memory type available on the device. +Default memory pool automatically grows in size. +Size of allocated blocks is also variable and managed automatically. +You are using default pools whenever you leave VmaAllocationCreateInfo::pool = null. + +You can create custom pool and allocate memory out of it. +It can be useful if you want to: + +- Keep certain kind of allocations separate from others. +- Enforce particular, fixed size of Vulkan memory blocks. +- Limit maximum amount of Vulkan memory allocated for that pool. +- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. +- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in + #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. +- Perform defragmentation on a specific subset of your allocations. + +To use custom memory pools: + +-# Fill VmaPoolCreateInfo structure. +-# Call vmaCreatePool() to obtain #VmaPool handle. +-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. + You don't need to specify any other parameters of this structure, like `usage`. + +Example: + +\code +// Find memoryTypeIndex for the pool. +VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +sampleBufCreateInfo.size = 0x10000; // Doesn't matter. +sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo sampleAllocCreateInfo = {}; +sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, + &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); +// Check res... + +// Create a pool that can have at most 2 blocks, 128 MiB each. +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +poolCreateInfo.blockSize = 128ULL * 1024 * 1024; +poolCreateInfo.maxBlockCount = 2; + +VmaPool pool; +res = vmaCreatePool(allocator, &poolCreateInfo, &pool); +// Check res... + +// Allocate a buffer out of it. +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 1024; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; + +VkBuffer buf; +VmaAllocation alloc; +res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); +// Check res... +\endcode + +You have to free all allocations made from this pool before destroying it. + +\code +vmaDestroyBuffer(allocator, buf, alloc); +vmaDestroyPool(allocator, pool); +\endcode + +New versions of this library support creating dedicated allocations in custom pools. +It is supported only when VmaPoolCreateInfo::blockSize = 0. +To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and +VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + + +\section custom_memory_pools_MemTypeIndex Choosing memory type index + +When creating a pool, you must explicitly specify memory type index. +To find the one suitable for your buffers or images, you can use helper functions +vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). +You need to provide structures with example parameters of buffers or images +that you are going to create in that pool. + +\code +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 1024; // Doesn't matter +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); + +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +// ... +\endcode + +When creating buffers/images allocated in that pool, provide following parameters: + +- `VkBufferCreateInfo`: Prefer to pass same parameters as above. + Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. + Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers + or the other way around. +- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. + Other members are ignored anyway. + + +\section custom_memory_pools_when_not_use When not to use custom pools + +Custom pools are commonly overused by VMA users. +While it may feel natural to keep some logical groups of resources separate in memory, +in most cases it does more harm than good. +Using custom pool shouldn't be your first choice. +Instead, please make all allocations from default pools first and only use custom pools +if you can prove and measure that it is beneficial in some way, +e.g. it results in lower memory usage, better performance, etc. + +Using custom pools has disadvantages: + +- Each pool has its own collection of `VkDeviceMemory` blocks. + Some of them may be partially or even completely empty. + Spreading allocations across multiple pools increases the amount of wasted (allocated but unbound) memory. +- You must manually choose specific memory type to be used by a custom pool (set as VmaPoolCreateInfo::memoryTypeIndex). + When using default pools, best memory type for each of your allocations can be selected automatically + using a carefully design algorithm that works across all kinds of GPUs. +- If an allocation from a custom pool at specific memory type fails, entire allocation operation returns failure. + When using default pools, VMA tries another compatible memory type. +- If you set VmaPoolCreateInfo::blockSize != 0, each memory block has the same size, + while default pools start from small blocks and only allocate next blocks larger and larger + up to the preferred block size. + +Many of the common concerns can be addressed in a different way than using custom pools: + +- If you want to keep your allocations of certain size (small versus large) or certain lifetime (transient versus long lived) + separate, you likely don't need to. + VMA uses a high quality allocation algorithm that manages memory well in various cases. + Please measure and check if using custom pools provides a benefit. +- If you want to keep your images and buffers separate, you don't need to. + VMA respects `bufferImageGranularity` limit automatically. +- If you want to keep your mapped and not mapped allocations separate, you don't need to. + VMA respects `nonCoherentAtomSize` limit automatically. + It also maps only those `VkDeviceMemory` blocks that need to map any allocation. + It even tries to keep mappable and non-mappable allocations in separate blocks to minimize the amount of mapped memory. +- If you want to choose a custom size for the default memory block, you can set it globally instead + using VmaAllocatorCreateInfo::preferredLargeHeapBlockSize. +- If you want to select specific memory type for your allocation, + you can set VmaAllocationCreateInfo::memoryTypeBits to `(1U << myMemoryTypeIndex)` instead. +- If you need to create a buffer with certain minimum alignment, you can still do it + using default pools by specifying VmaAllocationCreateInfo::minAlignment. + + +\section linear_algorithm Linear allocation algorithm + +Each Vulkan memory block managed by this library has accompanying metadata that +keeps track of used and unused regions. By default, the metadata structure and +algorithm tries to find best place for new allocations among free regions to +optimize memory usage. This way you can allocate and free objects in any order. + +![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) + +Sometimes there is a need to use simpler, linear allocation algorithm. You can +create custom pool that uses such algorithm by adding flag +#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. Then an alternative metadata management is used. It always +creates new allocations after last one and doesn't reuse free regions after +allocations freed in the middle. It results in better allocation performance and +less memory consumed by metadata. + +![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) + +With this one flag, you can create a custom pool that can be used in many ways: +free-at-once, stack, double stack, and ring buffer. See below for details. +You don't need to specify explicitly which of these options you are going to use - it is detected automatically. + +\subsection linear_algorithm_free_at_once Free-at-once + +In a pool that uses linear algorithm, you still need to free all the allocations +individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free +them in any order. New allocations are always made after last one - free space +in the middle is not reused. However, when you release all the allocation and +the pool becomes empty, allocation starts from the beginning again. This way you +can use linear algorithm to speed up creation of allocations that you are going +to release all at once. + +![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_stack Stack + +When you free an allocation that was created last, its space can be reused. +Thanks to this, if you always release allocations in the order opposite to their +creation (LIFO - Last In First Out), you can achieve behavior of a stack. + +![Stack](../gfx/Linear_allocator_4_stack.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_double_stack Double stack + +The space reserved by a custom pool with linear algorithm may be used by two +stacks: + +- First, default one, growing up from offset 0. +- Second, "upper" one, growing down from the end towards lower offsets. + +To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT +to VmaAllocationCreateInfo::flags. + +![Double stack](../gfx/Linear_allocator_7_double_stack.png) + +Double stack is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +When the two stacks' ends meet so there is not enough space between them for a +new allocation, such allocation fails with usual +`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + +\subsection linear_algorithm_ring_buffer Ring buffer + +When you free some allocations from the beginning and there is not enough free space +for a new one at the end of a pool, allocator's "cursor" wraps around to the +beginning and starts allocation there. Thanks to this, if you always release +allocations in the same order as you created them (FIFO - First In First Out), +you can achieve behavior of a ring buffer / queue. + +![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) + +Ring buffer is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page defragmentation Defragmentation + +Interleaved allocations and deallocations of many objects of varying size can +cause fragmentation over time, which can lead to a situation where the library is unable +to find a continuous range of free memory for a new allocation despite there is +enough free space, just scattered across many small free ranges between existing +allocations. + +To mitigate this problem, you can use defragmentation feature. +It doesn't happen automatically though and needs your cooperation, +because VMA is a low level library that only allocates memory. +It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. +It cannot copy their contents as it doesn't record any commands to a command buffer. + +Example: + +\code +VmaDefragmentationInfo defragInfo = {}; +defragInfo.pool = myPool; +defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; + +VmaDefragmentationContext defragCtx; +VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); +// Check res... + +for(;;) +{ + VmaDefragmentationPassMoveInfo pass; + res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... + + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo); + MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; + + // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. + VkImageCreateInfo imgCreateInfo = ... + VkImage newImg; + res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); + // Check res... + res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg); + // Check res... + + // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. + vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); + } + + // Make sure the copy commands finished executing. + vkWaitForFences(...); + + // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // ... + vkDestroyImage(device, resData->img, nullptr); + } + + // Update appropriate descriptors to point to the new places... + + res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... +} + +vmaEndDefragmentation(allocator, defragCtx, nullptr); +\endcode + +Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() +create/destroy an allocation and a buffer/image at once, these are just a shortcut for +creating the resource, allocating memory, and binding them together. +Defragmentation works on memory allocations only. You must handle the rest manually. +Defragmentation is an iterative process that should repreat "passes" as long as related functions +return `VK_INCOMPLETE` not `VK_SUCCESS`. +In each pass: + +1. vmaBeginDefragmentationPass() function call: + - Calculates and returns the list of allocations to be moved in this pass. + Note this can be a time-consuming process. + - Reserves destination memory for them by creating temporary destination allocations + that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). +2. Inside the pass, **you should**: + - Inspect the returned list of allocations to be moved. + - Create new buffers/images and bind them at the returned destination temporary allocations. + - Copy data from source to destination resources if necessary. + - Destroy the source buffers/images, but NOT their allocations. +3. vmaEndDefragmentationPass() function call: + - Frees the source memory reserved for the allocations that are moved. + - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. + - Frees `VkDeviceMemory` blocks that became empty. + +Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. +Defragmentation algorithm tries to move all suitable allocations. +You can, however, refuse to move some of them inside a defragmentation pass, by setting +`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. +This is not recommended and may result in suboptimal packing of the allocations after defragmentation. +If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. + +Inside a pass, for each allocation that should be moved: + +- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. + - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). +- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, + filled, and used temporarily in each rendering frame, you can just recreate this image + without copying its data. +- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU + using `memcpy()`. +- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + This will cancel the move. + - vmaEndDefragmentationPass() will then free the destination memory + not the source memory of the allocation, leaving it unchanged. +- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), + you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. + +You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool +(like in the example above) or all the default pools by setting this member to null. + +Defragmentation is always performed in each pool separately. +Allocations are never moved between different Vulkan memory types. +The size of the destination memory reserved for a moved allocation is the same as the original one. +Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. +Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. + +You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved +in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. +See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. + +It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA +usage, possibly from multiple threads, with the exception that allocations +returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. + +Mapping is preserved on allocations that are moved during defragmentation. +Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations +are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried +using VmaAllocationInfo::pMappedData. + +\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page statistics Statistics + +This library contains several functions that return information about its internal state, +especially the amount of memory allocated from Vulkan. + +\section statistics_numeric_statistics Numeric statistics + +If you need to obtain basic statistics about memory usage per heap, together with current budget, +you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. +This is useful to keep track of memory usage and stay within budget +(see also \ref staying_within_budget). +Example: + +\code +uint32_t heapIndex = ... + +VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; +vmaGetHeapBudgets(allocator, budgets); + +printf("My heap currently has %u allocations taking %llu B,\n", + budgets[heapIndex].statistics.allocationCount, + budgets[heapIndex].statistics.allocationBytes); +printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", + budgets[heapIndex].statistics.blockCount, + budgets[heapIndex].statistics.blockBytes); +printf("Vulkan reports total usage %llu B with budget %llu B.\n", + budgets[heapIndex].usage, + budgets[heapIndex].budget); +\endcode + +You can query for more detailed statistics per memory heap, type, and totals, +including minimum and maximum allocation size and unused range size, +by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. +This function is slower though, as it has to traverse all the internal data structures, +so it should be used only for debugging purposes. + +You can query for statistics of a custom pool using function vmaGetPoolStatistics() +or vmaCalculatePoolStatistics(). + +You can query for information about a specific allocation using function vmaGetAllocationInfo(). +It fill structure #VmaAllocationInfo. + +\section statistics_json_dump JSON dump + +You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). +The result is guaranteed to be correct JSON. +It uses ANSI encoding. +Any strings provided by user (see [Allocation names](@ref allocation_names)) +are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, +this JSON string can be treated as using this encoding. +It must be freed using function vmaFreeStatsString(). + +The format of this JSON string is not part of official documentation of the library, +but it will not change in backward-incompatible way without increasing library major version number +and appropriate mention in changelog. + +The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). +It can also contain detailed map of allocated memory blocks and their regions - +free and occupied by allocations. +This allows e.g. to visualize the memory or assess fragmentation. + + +\page allocation_annotation Allocation names and user data + +\section allocation_user_data Allocation user data + +You can annotate allocations with your own information, e.g. for debugging purposes. +To do that, fill VmaAllocationCreateInfo::pUserData field when creating +an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, +some handle, index, key, ordinal number or any other value that would associate +the allocation with your custom metadata. +It is useful to identify appropriate data structures in your engine given #VmaAllocation, +e.g. when doing \ref defragmentation. + +\code +VkBufferCreateInfo bufCreateInfo = ... + +MyBufferMetadata* pMetadata = CreateBufferMetadata(); + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.pUserData = pMetadata; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +\endcode + +The pointer may be later retrieved as VmaAllocationInfo::pUserData: + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; +\endcode + +It can also be changed using function vmaSetAllocationUserData(). + +Values of (non-zero) allocations' `pUserData` are printed in JSON report created by +vmaBuildStatsString() in hexadecimal form. + +\section allocation_names Allocation names + +An allocation can also carry a null-terminated string, giving a name to the allocation. +To set it, call vmaSetAllocationName(). +The library creates internal copy of the string, so the pointer you pass doesn't need +to be valid for whole lifetime of the allocation. You can free it after the call. + +\code +std::string imageName = "Texture: "; +imageName += fileName; +vmaSetAllocationName(allocator, allocation, imageName.c_str()); +\endcode + +The string can be later retrieved by inspecting VmaAllocationInfo::pName. +It is also printed in JSON report created by vmaBuildStatsString(). + +\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + + +\page virtual_allocator Virtual allocator + +As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator". +It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block". +You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan. +A common use case is sub-allocation of pieces of one large GPU buffer. + +\section virtual_allocator_creating_virtual_block Creating virtual block + +To use this functionality, there is no main "allocator" object. +You don't need to have #VmaAllocator object created. +All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator: + +-# Fill in #VmaVirtualBlockCreateInfo structure. +-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object. + +Example: + +\code +VmaVirtualBlockCreateInfo blockCreateInfo = {}; +blockCreateInfo.size = 1048576; // 1 MB + +VmaVirtualBlock block; +VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); +\endcode + +\section virtual_allocator_making_virtual_allocations Making virtual allocations + +#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions +using the same code as the main Vulkan memory allocator. +Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type +that represents an opaque handle to an allocation within the virtual block. + +In order to make such allocation: + +-# Fill in #VmaVirtualAllocationCreateInfo structure. +-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. + You can also receive `VkDeviceSize offset` that was assigned to the allocation. + +Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB + +VmaVirtualAllocation alloc; +VkDeviceSize offset; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); +if(res == VK_SUCCESS) +{ + // Use the 4 KB of your memory starting at offset. +} +else +{ + // Allocation failed - no space for it could be found. Handle this error! +} +\endcode + +\section virtual_allocator_deallocation Deallocation + +When no longer needed, an allocation can be freed by calling vmaVirtualFree(). +You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() +called for the same #VmaVirtualBlock. + +When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). +All allocations must be freed before the block is destroyed, which is checked internally by an assert. +However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once - +a feature not available in normal Vulkan memory allocator. Example: + +\code +vmaVirtualFree(block, alloc); +vmaDestroyVirtualBlock(block); +\endcode + +\section virtual_allocator_allocation_parameters Allocation parameters + +You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData(). +Its default value is null. +It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some +larger data structure containing more information. Example: + +\code +struct CustomAllocData +{ + std::string m_AllocName; +}; +CustomAllocData* allocData = new CustomAllocData(); +allocData->m_AllocName = "My allocation 1"; +vmaSetVirtualAllocationUserData(block, alloc, allocData); +\endcode + +The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function +vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. +If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! +Example: + +\code +VmaVirtualAllocationInfo allocInfo; +vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); +delete (CustomAllocData*)allocInfo.pUserData; + +vmaVirtualFree(block, alloc); +\endcode + +\section virtual_allocator_alignment_and_units Alignment and units + +It feels natural to express sizes and offsets in bytes. +If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member +VmaVirtualAllocationCreateInfo::alignment to request it. Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB +allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B + +VmaVirtualAllocation alloc; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); +\endcode + +Alignments of different allocations made from one block may vary. +However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`, +you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes. +It might be more convenient, but you need to make sure to use this new unit consistently in all the places: + +- VmaVirtualBlockCreateInfo::size +- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment +- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset + +\section virtual_allocator_statistics Statistics + +You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() +(to get brief statistics that are fast to calculate) +or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). +The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. +Example: + +\code +VmaStatistics stats; +vmaGetVirtualBlockStatistics(block, &stats); +printf("My virtual block has %llu bytes used by %u virtual allocations\n", + stats.allocationBytes, stats.allocationCount); +\endcode + +You can also request a full list of allocations and free regions as a string in JSON format by calling +vmaBuildVirtualBlockStatsString(). +Returned string must be later freed using vmaFreeVirtualBlockStatsString(). +The format of this string differs from the one returned by the main Vulkan allocator, but it is similar. + +\section virtual_allocator_additional_considerations Additional considerations + +The "virtual allocator" functionality is implemented on a level of individual memory blocks. +Keeping track of a whole collection of blocks, allocating new ones when out of free space, +deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user. + +Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory. +See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT). +You can find their description in chapter \ref custom_memory_pools. +Allocation strategies are also supported. +See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT). + +Following features are supported only by the allocator of the real GPU memory and not by virtual allocations: +buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`. + + +\page debugging_memory_usage Debugging incorrect memory usage + +If you suspect a bug with memory usage, like usage of uninitialized memory or +memory being overwritten out of bounds of an allocation, +you can use debug features of this library to verify this. + +\section debugging_memory_usage_initialization Memory initialization + +If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, +you can enable automatic memory initialization to verify this. +To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. + +\code +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#include "vk_mem_alloc.h" +\endcode + +It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. +Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. +Memory is automatically mapped and unmapped if necessary. + +If you find these values while debugging your program, good chances are that you incorrectly +read Vulkan memory that is allocated but not initialized, or already freed, respectively. + +Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. +It works also with dedicated allocations. + +\section debugging_memory_usage_margins Margins + +By default, allocations are laid out in memory blocks next to each other if possible +(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). + +![Allocations without margin](../gfx/Margins_1.png) + +Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified +number of bytes as a margin after every allocation. + +\code +#define VMA_DEBUG_MARGIN 16 +#include "vk_mem_alloc.h" +\endcode + +![Allocations with margin](../gfx/Margins_2.png) + +If your bug goes away after enabling margins, it means it may be caused by memory +being overwritten outside of allocation boundaries. It is not 100% certain though. +Change in application behavior may also be caused by different order and distribution +of allocations across memory blocks after margins are applied. + +Margins work with all types of memory. + +Margin is applied only to allocations made out of memory blocks and not to dedicated +allocations, which have their own memory block of specific size. +It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag +or those automatically decided to put into dedicated allocations, e.g. due to its +large size or recommended by VK_KHR_dedicated_allocation extension. + +Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. + +Note that enabling margins increases memory usage and fragmentation. + +Margins do not apply to \ref virtual_allocator. + +\section debugging_memory_usage_corruption_detection Corruption detection + +You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation +of contents of the margins. + +\code +#define VMA_DEBUG_MARGIN 16 +#define VMA_DEBUG_DETECT_CORRUPTION 1 +#include "vk_mem_alloc.h" +\endcode + +When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` +(it must be multiply of 4) after every allocation is filled with a magic number. +This idea is also know as "canary". +Memory is automatically mapped and unmapped if necessary. + +This number is validated automatically when the allocation is destroyed. +If it is not equal to the expected value, `VMA_ASSERT()` is executed. +It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, +which indicates a serious bug. + +You can also explicitly request checking margins of all allocations in all memory blocks +that belong to specified memory types by using function vmaCheckCorruption(), +or in memory blocks that belong to specified custom pool, by using function +vmaCheckPoolCorruption(). + +Margin validation (corruption detection) works only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. + + +\section debugging_memory_usage_leak_detection Leak detection features + +At allocation and allocator destruction time VMA checks for unfreed and unmapped blocks using +`VMA_ASSERT_LEAK()`. This macro defaults to an assertion, triggering a typically fatal error in Debug +builds, and doing nothing in Release builds. You can provide your own definition of `VMA_ASSERT_LEAK()` +to change this behavior. + +At memory block destruction time VMA lists out all unfreed allocations using the `VMA_LEAK_LOG_FORMAT()` +macro, which defaults to `VMA_DEBUG_LOG_FORMAT`, which in turn defaults to a no-op. +If you're having trouble with leaks - for example, the aforementioned assertion triggers, but you don't +quite know \em why -, overriding this macro to print out the the leaking blocks, combined with assigning +individual names to allocations using vmaSetAllocationName(), can greatly aid in fixing them. + +\page other_api_interop Interop with other graphics APIs + +VMA provides some features that help with interoperability with other graphics APIs, e.g. OpenGL, Direct3D 11, Direct3D 12. + +\section other_api_interop_exporting_memory Exporting memory + +On Windows, the VK_KHR_external_memory_win32 device extension allows exporting a Win32 `HANDLE` +of a `VkDeviceMemory` block, to be able to reference the memory on other Vulkan logical devices or instances, +in multiple processes, and/or in multiple APIs. +VMA offers support for it. + +\subsection other_api_interop_exporting_initialization Initialization + +1) Make sure the extension is defined in the code by including following header before including VMA: + +\code +#include +\endcode + +2) Check if "VK_KHR_external_memory_win32" is available among device extensions. +Enable it when creating the `VkDevice` object. + +3) Enable the usage of this extension in VMA by setting flag #VMA_ALLOCATOR_CREATE_KHR_EXTERNAL_MEMORY_WIN32_BIT +when calling vmaCreateAllocator(). + +4) Make sure that VMA has access to the `vkGetMemoryWin32HandleKHR` function by either enabling `VMA_DYNAMIC_VULKAN_FUNCTIONS` macro +or setting VmaVulkanFunctions::vkGetMemoryWin32HandleKHR explicitly. +For more information, see \ref quick_start_initialization_importing_vulkan_functions. + +\subsection other_api_interop_exporting_preparations Preparations + +You can find example usage among tests, in file "Tests.cpp", function `TestWin32Handles()`. + +To use the extenion, buffers need to be created with `VkExternalMemoryBufferCreateInfoKHR` attached to their `pNext` chain, +and memory allocations need to be made with `VkExportMemoryAllocateInfoKHR` attached to their `pNext` chain. +To make use of them, you need to use \ref custom_memory_pools. Example: + +\code +constexpr VkExternalMemoryHandleTypeFlagsKHR handleType = + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; + +// Define an example buffer and allocation parameters. +VkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = { + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, + nullptr, + handleType +}; +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 0x10000; // Doesn't matter here. +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; +exampleBufCreateInfo.pNext = &externalMemBufCreateInfo; + +VmaAllocationCreateInfo exampleAllocCreateInfo = {}; +exampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +// Find memory type index to use for the custom pool. +uint32_t memTypeIndex; +VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_Allocator, + &exampleBufCreateInfo, &exampleAllocCreateInfo, &memTypeIndex); +// Check res... + +// Create a custom pool. +constexpr static VkExportMemoryAllocateInfoKHR exportMemAllocInfo = { + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, + nullptr, + handleType +}; +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +poolCreateInfo.pMemoryAllocateNext = (void*)&exportMemAllocInfo; + +VmaPool pool; +res = vmaCreatePool(g_Allocator, &poolCreateInfo, &pool); +// Check res... + +// YOUR OTHER CODE COMES HERE.... + +// At the end, don't forget to destroy it! +vmaDestroyPool(g_Allocator, pool); +\endcode + +Note that the structure passed as VmaPoolCreateInfo::pMemoryAllocateNext must remain alive and unchanged +for the whole lifetime of the custom pool, because it will be used when the pool allocates a new device memory block. +No copy is made internally. This is why variable `exportMemAllocInfo` is defined as static. + +If you want to export all memory allocated by VMA from certain memory types, +including dedicated allocations and allocations made from default pools, +an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. +It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library +through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type. +You should not mix these two methods in a way that allows to apply both to the same memory type. +Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`. + +\subsection other_api_interop_exporting_memory_allocation Memory allocation + +Finally, you can create a buffer with an allocation out of the custom pool. +The buffer should use same flags as the sample buffer used to find the memory type. +It should also specify `VkExternalMemoryBufferCreateInfoKHR` in its `pNext` chain. + +\code +VkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = { + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, + nullptr, + handleType +}; +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = // Your desired buffer size. +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; +bufCreateInfo.pNext = &externalMemBufCreateInfo; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; // It is enough to set this one member. + +VkBuffer buf; +VmaAllocation alloc; +res = vmaCreateBuffer(g_Allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); +// Check res... + +// YOUR OTHER CODE COMES HERE.... + +// At the end, don't forget to destroy it! +vmaDestroyBuffer(g_Allocator, buf, alloc); +\endcode + +If you need each allocation to have its own device memory block and start at offset 0, you can still do +by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag. It works also with custom pools. + +Alternatively, you can use convenient functions vmaCreateDedicatedBuffer(), vmaCreateDedicatedImage() that +always allocate dedicated memory for the buffer/image created, and also allow specifying custom `pNext` chain +for the `VkMemoryAllocateInfo` structure. + +\subsection other_api_interop_exporting_exporting_win32_handle Exporting Win32 handle + +After the allocation is created, you can acquire a Win32 `HANDLE` to the `VkDeviceMemory` block it belongs to. +VMA function vmaGetMemoryWin32Handle2() is a replacement of the Vulkan function `vkGetMemoryWin32HandleKHR`. + +\code +HANDLE handle; +res = vmaGetMemoryWin32Handle2(g_Allocator, alloc, handleType, nullptr, &handle); +// Check res... + +// YOUR OTHER CODE COMES HERE.... + +// At the end, you must close the handle. +CloseHandle(handle); +\endcode + +Documentation of the VK_KHR_external_memory_win32 extension states that: + +> If handleType is defined as an NT handle, vkGetMemoryWin32HandleKHR must be called no more than once for each valid unique combination of memory and handleType. + +This is ensured automatically inside VMA. +If `VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT` is used as the handle type, +or other NT handle types, +the library fetches the handle on first use, remembers it internally, and closes it when the memory block or dedicated allocation is destroyed. +Every time you call vmaGetMemoryWin32Handle2(), VMA calls `DuplicateHandle` and returns a new handle that you need to close. +For further information, please check the documentation of this function. + +\subsection other_api_interop_exporting_custom_alignment Custom alignment + +Buffers or images exported to a different API like OpenGL may require a different alignment, +higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`. +To impose such alignment: + +You can create \ref custom_memory_pools for such allocations. +Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation +to be made out of this pool. +The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image +from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically. + +If you want to create a buffer/image/allocate memory with a specific minimum alignment out of default pools, +you can use VmaAllocationCreateInfo::minAlignment. + +Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated +allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block. +You can ensure that an allocation is created as dedicated by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation. + +\subsection other_api_interop_exporting_extended_allocation_information Extended allocation information + +If you want to rely on VMA to allocate your buffers and images inside larger memory blocks, +but you need to know the size of the entire block and whether the allocation was made +with its own dedicated memory, use function vmaGetAllocationInfo2() to retrieve +extended allocation information in structure #VmaAllocationInfo2, which provides extra members: +`blockSize` and `dedicatedMemory`. + +\section other_api_interop_importing_memory Importing memory + +Importing external memory requires attaching an extra structure like `VkImportMemoryWin32HandleInfoKHR` +to the `pNext` chain of `VkMemoryAllocateInfo` structure. +VMA offers support for it by providing functions that allocate memory, create a buffer or an image +always with a dedicated `VkDeviceMemory` block and accept custom `pNext` pointer: +vmaAllocateDedicatedMemory(), vmaCreateDedicatedBuffer(), vmaCreateDedicatedImage(). +Example: + +\code +constexpr VkExternalMemoryHandleTypeFlagBits handleType = + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT; + +VkExternalMemoryBufferCreateInfoKHR externalMemBufCreateInfo = { + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR }; +externalMemBufCreateInfo.handleTypes = handleType; + +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.pNext = &externalMemBufCreateInfo; // !!! +bufCreateInfo.size = ... +bufCreateInfo.usage = ... + +VkImportMemoryWin32HandleInfoKHR importInfo = { + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR }; +importInfo.handleType = handleType; +importInfo.handle = myExternalHandleToImport; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buf = VK_NULL_HANDLE; +VmaAllocation alloc = VK_NULL_HANDLE; +VkResult res = vmaCreateDedicatedBuffer(allocator, &bufCreateInfo, &allocCreateInfo, + &importInfo, // pMemoryAllocateNext !!! + &buf, &alloc, nullptr); +// Check res... +\endcode + + + +\page usage_patterns Recommended usage patterns + +Vulkan gives great flexibility in memory allocation. +This chapter shows the most common patterns. + +See also slides from talk: +[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) + + +\section usage_patterns_gpu_only GPU-only resource + +When: +Any resources that you frequently write and read on GPU, +e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, +images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). + +What to do: +Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +Also consider: +Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them with different sizes +e.g. when display resolution changes. +Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. +When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation +to decrease chances to be evicted to system memory by the operating system. + +\section usage_patterns_staging_copy_upload Staging copy for upload + +When: +A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer +to some GPU resource. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +memcpy(allocInfo.pMappedData, myData, myDataSize); +\endcode + +Also consider: +You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped +using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. + + +\section usage_patterns_readback Readback + +When: +Buffers for data written by or transferred from the GPU that you want to read back on the CPU, +e.g. results of some computations. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +const float* downloadedData = (const float*)allocInfo.pMappedData; +\endcode + + +\section usage_patterns_advanced_data_uploading Advanced data uploading + +For resources that you frequently write on CPU via mapped pointer and +frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: + +-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, + even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, + and make the device reach out to that resource directly. + - Reads performed by the device will then go through PCI Express bus. + The performance of this access may be limited, but it may be fine depending on the size + of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity + of access. +-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), + a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` + (fast to access from the GPU). Then, it is likely the best choice for such type of resource. +-# Systems with a discrete graphics card and separate video memory may or may not expose + a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). + If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) + that is available to CPU for mapping. + - Writes performed by the host to that memory go through PCI Express bus. + The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, + as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. +-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, + a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. + +Thankfully, VMA offers an aid to create and use such resources in the the way optimal +for the current Vulkan device. To help the library make the best choice, +use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with +#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. +It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), +but if no such memory type is available or allocation from it fails +(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), +it will fall back to `DEVICE_LOCAL` memory for fast GPU access. +It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, +so you need to create another "staging" allocation and perform explicit transfers. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +VkResult result = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); +// Check result... + +VkMemoryPropertyFlags memPropFlags; +vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); + +if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) +{ + // The Allocation ended up in a mappable memory. + // Calling vmaCopyMemoryToAllocation() does vmaMapMemory(), memcpy(), vmaUnmapMemory(), and vmaFlushAllocation(). + result = vmaCopyMemoryToAllocation(allocator, myData, alloc, 0, myDataSize); + // Check result... + + VkBufferMemoryBarrier bufMemBarrier = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; + bufMemBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; + bufMemBarrier.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT; + bufMemBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier.buffer = buf; + bufMemBarrier.offset = 0; + bufMemBarrier.size = VK_WHOLE_SIZE; + + // It's important to insert a buffer memory barrier here to ensure writing to the buffer has finished. + vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, + 0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr); +} +else +{ + // Allocation ended up in a non-mappable memory - a transfer using a staging buffer is required. + VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + stagingBufCreateInfo.size = 65536; + stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo stagingAllocCreateInfo = {}; + stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + + VkBuffer stagingBuf; + VmaAllocation stagingAlloc; + VmaAllocationInfo stagingAllocInfo; + result = vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, + &stagingBuf, &stagingAlloc, &stagingAllocInfo); + // Check result... + + // Calling vmaCopyMemoryToAllocation() does vmaMapMemory(), memcpy(), vmaUnmapMemory(), and vmaFlushAllocation(). + result = vmaCopyMemoryToAllocation(allocator, myData, stagingAlloc, 0, myDataSize); + // Check result... + + VkBufferMemoryBarrier bufMemBarrier = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; + bufMemBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; + bufMemBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + bufMemBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier.buffer = stagingBuf; + bufMemBarrier.offset = 0; + bufMemBarrier.size = VK_WHOLE_SIZE; + + // Insert a buffer memory barrier to make sure writing to the staging buffer has finished. + vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr); + + VkBufferCopy bufCopy = { + 0, // srcOffset + 0, // dstOffset, + myDataSize, // size + }; + + vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); + + VkBufferMemoryBarrier bufMemBarrier2 = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER }; + bufMemBarrier2.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + bufMemBarrier2.dstAccessMask = VK_ACCESS_UNIFORM_READ_BIT; // We created a uniform buffer + bufMemBarrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + bufMemBarrier2.buffer = buf; + bufMemBarrier2.offset = 0; + bufMemBarrier2.size = VK_WHOLE_SIZE; + + // Make sure copying from staging buffer to the actual buffer has finished by inserting a buffer memory barrier. + vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, + 0, 0, nullptr, 1, &bufMemBarrier2, 0, nullptr); +} +\endcode + +\section usage_patterns_other_use_cases Other use cases + +Here are some other, less obvious use cases and their recommended settings: + +- An image that is used only as transfer source and destination, but it should stay on the device, + as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, + for temporal antialiasing or other temporal effects. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO +- An image that is used only as transfer source and destination, but it should be placed + in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict + least recently used textures from VRAM. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, + as VMA needs a hint here to differentiate from the previous case. +- A buffer that you want to map and write from the CPU, directly read from the GPU + (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or + host memory due to its large size. + - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST + - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT + + +\page configuration Configuration + +Please check "CONFIGURATION SECTION" in the code to find macros that you can define +before each include of this file or change directly in this file to provide +your own implementation of basic facilities like assert, `min()` and `max()` functions, +mutex, atomic etc. + +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + +Similarly, you can define `VMA_LEAK_LOG_FORMAT` macro to enable printing of leaked (unfreed) allocations, +including their names and other parameters. Example: + +\code +#define VMA_LEAK_LOG_FORMAT(format, ...) do { \ + printf((format), __VA_ARGS__); \ + printf("\n"); \ + } while(false) +\endcode + +\section config_Vulkan_functions Pointers to Vulkan functions + +There are multiple ways to import pointers to Vulkan functions in the library. +In the simplest case you don't need to do anything. +If the compilation or linking of your program or the initialization of the #VmaAllocator +doesn't work for you, you can try to reconfigure it. + +First, the allocator tries to fetch pointers to Vulkan functions linked statically, +like this: + +\code +m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; +\endcode + +If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. + +Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. +You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or +by using a helper library like [volk](https://github.com/zeux/volk). + +Third, VMA tries to fetch remaining pointers that are still null by calling +`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. +You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. +Other pointers will be fetched automatically. +If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. + +Finally, all the function pointers required by the library (considering selected +Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. + + +\section custom_memory_allocator Custom host memory allocator + +If you use custom allocator for CPU memory rather than default operator `new` +and `delete` from C++, you can make this library using your allocator as well +by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These +functions will be passed to Vulkan, as well as used by the library itself to +make any CPU-side allocations. + +\section allocation_callbacks Device memory allocation callbacks + +The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. +You can setup callbacks to be informed about these calls, e.g. for the purpose +of gathering some statistics. To do it, fill optional member +VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + +\section heap_memory_limit Device heap memory limit + +When device memory of certain heap runs out of free space, new allocations may +fail (returning error code) or they may succeed, silently pushing some existing_ +memory blocks from GPU VRAM to system RAM (which degrades performance). This +behavior is implementation-dependent - it depends on GPU vendor and graphics +driver. + +On AMD cards it can be controlled while creating Vulkan device object by using +VK_AMD_memory_overallocation_behavior extension, if available. + +Alternatively, if you want to test how your program behaves with limited amount of Vulkan device +memory available without switching your graphics card to one that really has +smaller VRAM, you can use a feature of this library intended for this purpose. +To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. + + + +\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation + +VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve +performance on some GPUs. It augments Vulkan API with possibility to query +driver whether it prefers particular buffer or image to have its own, dedicated +allocation (separate `VkDeviceMemory` block) for better efficiency - to be able +to do some internal optimizations. The extension is supported by this library. +It will be used automatically when enabled. + +It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version +and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, +you are all set. + +Otherwise, if you want to use it as an extension: + +1 . When creating Vulkan device, check if following 2 device extensions are +supported (call `vkEnumerateDeviceExtensionProperties()`). +If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). + +- VK_KHR_get_memory_requirements2 +- VK_KHR_dedicated_allocation + +If you enabled these extensions: + +2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating +your #VmaAllocator to inform the library that you enabled required extensions +and you want the library to use them. + +\code +allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; + +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +That is all. The extension will be automatically used whenever you create a +buffer using vmaCreateBuffer() or image using vmaCreateImage(). + +When using the extension together with Vulkan Validation Layer, you will receive +warnings like this: + +_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ + +It is OK, you should just ignore it. It happens because you use function +`vkGetBufferMemoryRequirements2KHR()` instead of standard +`vkGetBufferMemoryRequirements()`, while the validation layer seems to be +unaware of it. + +To learn more about this extension, see: + +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) + + + +\page vk_ext_memory_priority VK_EXT_memory_priority + +VK_EXT_memory_priority is a device extension that allows to pass additional "priority" +value to Vulkan memory allocations that the implementation may use prefer certain +buffers and images that are critical for performance to stay in device-local memory +in cases when the memory is over-subscribed, while some others may be moved to the system memory. + +VMA offers convenient usage of this extension. +If you enable it, you can pass "priority" parameter when creating allocations or custom pools +and the library automatically passes the value to Vulkan using this extension. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_ext_memory_priority_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to +`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_ext_memory_priority_usage Usage + +When using this extension, you should initialize following member: + +- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +- VmaPoolCreateInfo::priority when creating a custom pool. + +It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5F`. +Memory allocated with higher value can be treated by the Vulkan implementation as higher priority +and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. + +It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images +as dedicated and set high priority to them. For example: + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +`priority` member is ignored in the following situations: + +- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters + from the parameters passed in #VmaPoolCreateInfo when the pool was created. +- Allocations created in default pools: They inherit the priority from the parameters + VMA used when creating default pools, which means `priority == 0.5F`. + + +\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory + +VK_AMD_device_coherent_memory is a device extension that enables access to +additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and +`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for +allocation of buffers intended for writing "breadcrumb markers" in between passes +or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. + +When the extension is available but has not been enabled, Vulkan physical device +still exposes those memory types, but their usage is forbidden. VMA automatically +takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt +to allocate memory of such type is made. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_amd_device_coherent_memory_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_amd_device_coherent_memory_usage Usage + +After following steps described above, you can create VMA allocations and custom pools +out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible +devices. There are multiple ways to do it, for example: + +- You can request or prefer to allocate out of such memory types by adding + `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with + other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. +- If you manually found memory type index to use for this purpose, force allocation + from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1U << index`. + +\section vk_amd_device_coherent_memory_more_information More information + +To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + + +\page enabling_buffer_device_address Enabling buffer device address + +Device extension VK_KHR_buffer_device_address +allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. +It has been promoted to core Vulkan 1.2. + +If you want to use this feature in connection with VMA, follow these steps: + +\section enabling_buffer_device_address_initialization Initialization + +1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains +"VK_KHR_buffer_device_address". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true. + +3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add +"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +to VmaAllocatorCreateInfo::flags. + +\section enabling_buffer_device_address_usage Usage + +After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. +The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to +allocated memory blocks wherever it might be needed. + +Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. +The second part of this functionality related to "capture and replay" is not supported, +as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. + +\section enabling_buffer_device_address_more_information More information + +To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + +\page general_considerations General considerations + +\section general_considerations_thread_safety Thread safety + +- The library has no global state, so separate #VmaAllocator objects can be used + independently. + There should be no need to create multiple such objects though - one per `VkDevice` is enough. +- By default, all calls to functions that take #VmaAllocator as first parameter + are safe to call from multiple threads simultaneously because they are + synchronized internally when needed. + This includes allocation and deallocation from default memory pool, as well as custom #VmaPool. +- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT + flag, calls to functions that take such #VmaAllocator object must be + synchronized externally. +- Access to a #VmaAllocation object must be externally synchronized. For example, + you must not call vmaGetAllocationInfo() and vmaMapMemory() from different + threads at the same time if you pass the same #VmaAllocation object to these + functions. +- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. + +\section general_considerations_versioning_and_compatibility Versioning and compatibility + +The library uses [**Semantic Versioning**](https://semver.org/), +which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: + +- Incremented Patch version means a release is backward- and forward-compatible, + introducing only some internal improvements, bug fixes, optimizations etc. + or changes that are out of scope of the official API described in this documentation. +- Incremented Minor version means a release is backward-compatible, + so existing code that uses the library should continue to work, while some new + symbols could have been added: new structures, functions, new values in existing + enums and bit flags, new structure members, but not new function parameters. +- Incrementing Major version means a release could break some backward compatibility. + +All changes between official releases are documented in file "CHANGELOG.md". + +\warning Backward compatibility is considered on the level of C++ source code, not binary linkage. +Adding new members to existing structures is treated as backward compatible if initializing +the new members to binary zero results in the old behavior. +You should always fully initialize all library structures to zeros and not rely on their +exact binary size. + +\section general_considerations_validation_layer_warnings Validation layer warnings + +When using this library, you can meet following types of warnings issued by +Vulkan validation layer. They don't necessarily indicate a bug, so you may need +to just ignore them. + +- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* + - It happens when you map a buffer or image, because the library maps entire + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. +- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* + - It may happen when you use [defragmentation](@ref defragmentation). + +\section general_considerations_allocation_algorithm Allocation algorithm + +The library uses following algorithm for allocation, in order: + +-# Try to find free range of memory in existing blocks. +-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. +-# If failed, try to create such block with size / 2, size / 4, size / 8. +-# If failed, try to allocate separate `VkDeviceMemory` for this allocation, + just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +-# If failed, choose other memory type that meets the requirements specified in + VmaAllocationCreateInfo and go to point 1. +-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + +\section general_considerations_features_not_supported Features not supported + +Features deliberately excluded from the scope of this library: + +-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. + VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. +-# **Recreation of buffers and images.** Although the library has functions for + buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to + recreate these objects yourself after defragmentation. That is because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +-# **Handling CPU memory allocation failures.** When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. + Success of an allocation is just checked with an assert. +-# **Code free of any compiler warnings.** Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. + There are many preprocessor macros that make some variables unused, function parameters unreferenced, + or conditional expressions constant in some configurations. + The code of this library should not be bigger or more complicated just to silence these warnings. + It is recommended to disable such warnings instead. +-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but + are not going to be included into this repository. +*/ diff --git a/extern/crashpad/CMakeLists.txt b/extern/crashpad/CMakeLists.txt index f0f9dfe5b5..0b999c9249 100644 --- a/extern/crashpad/CMakeLists.txt +++ b/extern/crashpad/CMakeLists.txt @@ -104,7 +104,7 @@ endif() set(GN_WIN_LINK_FLAG "") # Dynamically link to C runtime library if(WIN32) - set(GN_WIN_LINK_FLAG /MT) # Dynamically link to C runtime library + set(GN_WIN_LINK_FLAG /MD) # Dynamically link to C runtime library if(CMAKE_BUILD_TYPE STREQUAL Debug) #TODO: Fix for multi-config generators string(APPEND GN_WIN_LINK_FLAG d) endif() diff --git a/extern/vk-bootstrap/.clang-format b/extern/vk-bootstrap/.clang-format new file mode 100644 index 0000000000..d247deea6e --- /dev/null +++ b/extern/vk-bootstrap/.clang-format @@ -0,0 +1,42 @@ +BasedOnStyle: LLVM + +AccessModifierOffset: 0 +AlignEscapedNewlinesLeft: false +AlignTrailingComments: true +AlignAfterOpenBracket: DontAlign +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortFunctionsOnASingleLine: true +AllowShortIfStatementsOnASingleLine: true +BinPackArguments: false +BinPackParameters: false +BreakBeforeBinaryOperators: false +BreakBeforeTernaryOperators: false +BreakConstructorInitializersBeforeComma: false +ColumnLimit: 120 +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 0 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: false +DerivePointerBinding: false +IndentCaseLabels: true +IndentFunctionDeclarationAfterType: false +IndentWidth: 4 +Language: Cpp +MaxEmptyLinesToKeep: 4 +PenaltyBreakBeforeFirstCallParameter: 100 +PenaltyBreakComment: 100 +PenaltyBreakFirstLessLess: 0 +PenaltyBreakString: 100 +PenaltyExcessCharacter: 1 +PenaltyReturnTypeOnItsOwnLine: 20 +PointerBindsToType: true +SpaceBeforeAssignmentOperators: true +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInCStyleCastParentheses: false +SpacesInContainerLiterals: false +SpacesInParentheses: false +Standard: Auto +TabWidth: 4 +SortIncludes: false diff --git a/extern/vk-bootstrap/.github/dependabot.yml b/extern/vk-bootstrap/.github/dependabot.yml new file mode 100644 index 0000000000..0d08e261a2 --- /dev/null +++ b/extern/vk-bootstrap/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "github-actions" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/extern/vk-bootstrap/.github/workflows/ci_build.yml b/extern/vk-bootstrap/.github/workflows/ci_build.yml new file mode 100644 index 0000000000..f687aeb6d3 --- /dev/null +++ b/extern/vk-bootstrap/.github/workflows/ci_build.yml @@ -0,0 +1,198 @@ +#!/usr/bin/env python3 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without +# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +# of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Copyright © 2023 Charles Giessen (charles@lunarg.com) +# + + +name: CI Build + +on: [push, pull_request] + +jobs: + linux: + name: Linux + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + strategy: + fail-fast: false + matrix: + type: ["Debug", "Release"] + cc: ["gcc", "clang"] + cxx: ["g++", "clang++"] + exclude: + - cc: gcc + cxx: clang++ + - cc: clang + cxx: g++ + + + steps: + - uses: actions/checkout@v5 + - uses: lukka/get-cmake@latest + with: + cmakeVersion: 3.22 + + - name: Install build dependencies + run: | + sudo apt-get update + sudo apt-get install -y xorg-dev + + - name: CMake Configure + run: cmake -S. -B build -DCMAKE_BUILD_TYPE=${{matrix.type}} -DCMAKE_C_COMPILER=${{matrix.cc}} -DCMAKE_CXX_COMPILER=${{matrix.cxx}} -DVK_BOOTSTRAP_WERROR=ON -DVK_BOOTSTRAP_TEST=ON -DENABLE_ADDRESS_SANITIZER=ON + + - name: CMake Build + run: cmake --build build --config ${{matrix.type}} + + - name: Install + run: cmake --install build --prefix build/install + + - name: Run tests + working-directory: ./build + run: ctest --output-on-failure -C ${{matrix.type}} + + linux-thread-sanitizer: + name: Linux with Thread Sanitizer + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + strategy: + fail-fast: false + matrix: + type: ["Release"] + cc: ["gcc", "clang"] + cxx: ["g++", "clang++"] + exclude: + - cc: gcc + cxx: clang++ + - cc: clang + cxx: g++ + + steps: + - uses: actions/checkout@v5 + - uses: lukka/get-cmake@latest + with: + cmakeVersion: 3.22 + + - name: Install build dependencies + run: | + sudo apt-get update + sudo apt-get install -y xorg-dev + + - name: CMake Configure + run: cmake -S. -B build -DCMAKE_BUILD_TYPE=${{matrix.type}} -DCMAKE_C_COMPILER=${{matrix.cc}} -DCMAKE_CXX_COMPILER=${{matrix.cxx}} -DVK_BOOTSTRAP_WERROR=ON -DVK_BOOTSTRAP_TEST=ON -DENABLE_THREAD_SANITIZER=ON + + - name: CMake Build + run: cmake --build build --config ${{matrix.type}} + + - name: Install + run: cmake --install build --prefix build/install + + - name: Run tests + working-directory: ./build + run: ctest --output-on-failure -C ${{matrix.type}} + + windows: + name: Windows + runs-on: windows-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + strategy: + matrix: + arch: [ Win32, x64 ] + type: [ Debug, Release ] + + steps: + - uses: actions/checkout@v5 + - uses: TheMrMilchmann/setup-msvc-dev@v4 + with: + arch: ${{matrix.arch}} + + - name: CMake Configure + run: cmake -S. -B build -D CMAKE_BUILD_TYPE=${{matrix.type}} -D VK_BOOTSTRAP_WERROR=ON -D VK_BOOTSTRAP_TEST=ON -D ENABLE_ADDRESS_SANITIZER=ON -A ${{matrix.arch}} + + - name: CMake Build + run: cmake --build build --config ${{matrix.type}} + + - name: Run tests + working-directory: ./build + run: ctest --output-on-failure -C ${{matrix.type}} + + macos: + name: macOS + runs-on: macos-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + strategy: + fail-fast: false + matrix: + type: ["Debug", "Release"] + cc: ["clang"] + cxx: ["clang++"] + + steps: + - uses: actions/checkout@v5 + - uses: lukka/get-cmake@latest + with: + cmakeVersion: 3.22 + + - name: Install build dependencies + run: | + brew update + brew install xorg-server + + - name: CMake Configure + run: cmake -S. -B build -DCMAKE_BUILD_TYPE=${{matrix.type}} -DCMAKE_C_COMPILER=${{matrix.cc}} -DCMAKE_CXX_COMPILER=${{matrix.cxx}} -DVK_BOOTSTRAP_WERROR=ON -DVK_BOOTSTRAP_TEST=ON -DENABLE_ADDRESS_SANITIZER=ON + + - name: CMake Build + run: cmake --build build --config ${{matrix.type}} + + - name: Install + run: cmake --install build --prefix build/install + + - name: Run tests + working-directory: ./build + run: ctest --output-on-failure -C ${{matrix.type}} + + macos-thread-sanitizer: + name: macOS with Thread Sanitizer + runs-on: macos-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + strategy: + fail-fast: false + matrix: + type: ["Release"] + cc: ["clang"] + cxx: ["clang++"] + + steps: + - uses: actions/checkout@v5 + - uses: lukka/get-cmake@latest + with: + cmakeVersion: 3.22 + + - name: Install build dependencies + run: | + brew update + brew install xorg-server + + - name: CMake Configure + run: cmake -S. -B build -DCMAKE_BUILD_TYPE=${{matrix.type}} -DCMAKE_C_COMPILER=${{matrix.cc}} -DCMAKE_CXX_COMPILER=${{matrix.cxx}} -DVK_BOOTSTRAP_WERROR=ON -DVK_BOOTSTRAP_TEST=ON -DENABLE_THREAD_SANITIZER=ON + + - name: CMake Build + run: cmake --build build --config ${{matrix.type}} + + - name: Install + run: cmake --install build --prefix build/install + + - name: Run tests + working-directory: ./build + run: ctest --output-on-failure -C ${{matrix.type}} diff --git a/extern/vk-bootstrap/.github/workflows/create_tag.yml b/extern/vk-bootstrap/.github/workflows/create_tag.yml new file mode 100644 index 0000000000..d658040456 --- /dev/null +++ b/extern/vk-bootstrap/.github/workflows/create_tag.yml @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without +# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +# of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Copyright © 2023 Charles Giessen (charles@lunarg.com) +# + +name: Tag header update + +on: + workflow_dispatch: + +jobs: + tag_header_update: + name: Tag header update + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v5 + + - name: Read CurrentBuildVulkanVersion.cmake + id: read-version-file + uses: juliangruber/read-file-action@v1 + with: + path: gen/CurrentBuildVulkanVersion.cmake + + - name: Match regex from version file + uses: actions-ecosystem/action-regex-match@v2 + id: regex-match + with: + text: ${{ steps.read-version-file.outputs.content }} + regex: 'VK_BOOTSTRAP_SOURCE_HEADER_VERSION_GIT_TAG (v[0-9]\.[0-9]\.[0-9]*)' + + - name: Push tag + uses: EndBug/latest-tag@latest + if: ${{ steps.regex-match.outputs.match != '' }} + with: + tag-name: ${{ steps.regex-match.outputs.group1 }} + description: Update to ${{ steps.regex-match.outputs.group1 }} of Vulkan-Headers + diff --git a/extern/vk-bootstrap/.github/workflows/run_autogen.yml b/extern/vk-bootstrap/.github/workflows/run_autogen.yml new file mode 100644 index 0000000000..59aeab7bf8 --- /dev/null +++ b/extern/vk-bootstrap/.github/workflows/run_autogen.yml @@ -0,0 +1,68 @@ +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without +# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +# of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Copyright © 2023 Charles Giessen (charles@lunarg.com) +# + +# The purpose of this script is to automatically run the autogen code every week and submit a PR to include the changes + +name: Run autogen + +on: + schedule: + - cron: '0 0 * * 2' + workflow_dispatch: + +jobs: + run_autogen: + name: Run autogen + runs-on: ubuntu-latest + env: + PR_NUMBER: ${{ github.event.number }} + + steps: + - uses: actions/create-github-app-token@v2 + id: generate-token + with: + app-id: ${{ vars.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.10' + + - run: pip install vulkan_object + + - name: Check out repository code + uses: actions/checkout@v5 + with: + fetch-depth: 2 + + - run: python script/generate_dispatch.py + + - run: python script/generate_features_chain.py + + - name: Diff source to see if anything changed + id: git-diff + run: echo "::set-output name=git-diff::$(git diff --quiet HEAD~0 || echo true)" + + - name: pull-request + uses: peter-evans/create-pull-request@v7 + if: ${{ steps.git-diff.outputs.git-diff == 'true' }} + with: + token: ${{ steps.generate-token.outputs.token }} + commit-message: Update to latest Vulkan-Headers + title: Update to latest Vulkan-Headers + branch: run-autogen + base: main + delete-branch: true diff --git a/extern/vk-bootstrap/.gitignore b/extern/vk-bootstrap/.gitignore new file mode 100644 index 0000000000..579c4266e3 --- /dev/null +++ b/extern/vk-bootstrap/.gitignore @@ -0,0 +1,6 @@ +*.vscode +*.vs +*.idea +cmake-build-* +build +.cache diff --git a/extern/vk-bootstrap/CMakeLists.txt b/extern/vk-bootstrap/CMakeLists.txt new file mode 100644 index 0000000000..7ec2d39f08 --- /dev/null +++ b/extern/vk-bootstrap/CMakeLists.txt @@ -0,0 +1,185 @@ +cmake_minimum_required(VERSION 3.22) + +include(gen/CurrentBuildVulkanVersion.cmake) + +project(VulkanBootstrap + LANGUAGES CXX + DESCRIPTION "A Vulkan utility library to ease the initialization steps in Vulkan" + VERSION ${VK_BOOTSTRAP_SOURCE_HEADER_VERSION}) + +option(VK_BOOTSTRAP_DISABLE_WARNINGS "Disable warnings during compilation" OFF) +option(VK_BOOTSTRAP_WERROR "Enable warnings as errors during compilation" OFF) + +option(VK_BOOTSTRAP_TEST "Test Vk-Bootstrap using Catch2 as well as build examples" ${PROJECT_IS_TOP_LEVEL}) +option(VK_BOOTSTRAP_INSTALL "Enable installing of vk-bootstrap" ${PROJECT_IS_TOP_LEVEL}) + +option(VK_BOOTSTRAP_FORCE_FETCH_CONTENT "Forces Vulkan-Headers to be acquired using Fetch Content") +set(VK_BOOTSTRAP_VULKAN_HEADER_DIR "" CACHE FILEPATH "Specify the location of the Vulkan-Headers include directory.") +mark_as_advanced(VK_BOOTSTRAP_VULKAN_HEADER_DIR) + +# Check if the user has set this variable explicitly +if(IS_DIRECTORY ${VK_BOOTSTRAP_VULKAN_HEADER_DIR}) + add_library(Vulkan-Headers INTERFACE) + add_library(Vulkan::Headers ALIAS Vulkan-Headers) + target_include_directories(Vulkan-Headers INTERFACE $) + # If we had to use a direct path to get the headers, disable installing + set(VK_BOOTSTRAP_INSTALL OFF) +# Check if the target is already defined +elseif(NOT TARGET Vulkan::Headers) + if (NOT VK_BOOTSTRAP_FORCE_FETCH_CONTENT) + # Try looking for the VulkanHeaders package directly + find_package(VulkanHeaders CONFIG QUIET) + endif() + if (NOT VulkanHeaders_FOUND) + if (NOT VK_BOOTSTRAP_FORCE_FETCH_CONTENT) + # Try looking using the CMake built in Vulkan support + find_package(Vulkan QUIET) + endif() + + if(Vulkan_FOUND) + # Older CMake versions don't contain Vulkan::Headers - create it in that case + if (NOT TARGET Vulkan::Headers) + add_library(Vulkan-Headers INTERFACE) + add_library(Vulkan::Headers ALIAS Vulkan-Headers) + target_include_directories(Vulkan-Headers INTERFACE $) + set(VK_BOOTSTRAP_INSTALL OFF) + endif() + else() + # Lastly just grab Vulkan-Headers directly using FetchContent + include(FetchContent) + FetchContent_Declare( + Vulkan-Headers-for-vk-bootstrap + GIT_REPOSITORY https://github.com/KhronosGroup/Vulkan-Headers + GIT_TAG ${VK_BOOTSTRAP_SOURCE_HEADER_VERSION_GIT_TAG} + ) + FetchContent_MakeAvailable(Vulkan-Headers-for-vk-bootstrap) + # If we had to use FetchContent to get the headers, disable installing + set(VK_BOOTSTRAP_INSTALL OFF) + endif() + endif() +endif() + +if(NOT TARGET Vulkan::Headers) + message(FATAL_ERROR "Unable to locate required dependency Vulkan::Headers!") +endif() + +add_library(vk-bootstrap-compiler-warnings INTERFACE) + +set(VK_BOOTSTRAP_COMPILER_FRONTEND ${CMAKE_CXX_COMPILER_FRONTEND_VARIANT}) +if(NOT VK_BOOTSTRAP_COMPILER_FRONTEND) + set(VK_BOOTSTRAP_COMPILER_FRONTEND "None") +endif() + +if(NOT VK_BOOTSTRAP_DISABLE_WARNINGS) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|AppleClang" OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND VK_BOOTSTRAP_COMPILER_FRONTEND MATCHES "GNU")) + target_compile_options(vk-bootstrap-compiler-warnings INTERFACE -Wall -Wextra -Wconversion -Wsign-conversion) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU") + target_compile_options(vk-bootstrap-compiler-warnings INTERFACE -Wstrict-aliasing) # GCC only warning + endif() + if(VK_BOOTSTRAP_WERROR) + target_compile_options(vk-bootstrap-compiler-warnings INTERFACE -Werror -pedantic -pedantic-errors) + endif() + elseif (CMAKE_CXX_COMPILER_ID MATCHES "MSVC" OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND VK_BOOTSTRAP_COMPILER_FRONTEND MATCHES "MSVC")) + target_compile_options(vk-bootstrap-compiler-warnings INTERFACE /W4) + if(VK_BOOTSTRAP_WERROR) + target_compile_options(vk-bootstrap-compiler-warnings INTERFACE /WX) + endif() + endif() +endif() + +add_library(vk-bootstrap STATIC src/VkBootstrap.h src/VkBootstrap.cpp src/VkBootstrapDispatch.h src/VkBootstrapFeatureChain.h src/VkBootstrapFeatureChain.inl) +add_library(vk-bootstrap::vk-bootstrap ALIAS vk-bootstrap) + +target_include_directories(vk-bootstrap PUBLIC + $ + $) +target_link_libraries(vk-bootstrap + PUBLIC + Vulkan::Headers + PRIVATE + vk-bootstrap-compiler-warnings + ${CMAKE_DL_LIBS}) +target_compile_features(vk-bootstrap PUBLIC cxx_std_17) + +option(VK_BOOTSTRAP_POSITION_INDEPENDENT_CODE "Default value is the value of BUILD_SHARED_LIBS" ${BUILD_SHARED_LIBS}) +set_target_properties(vk-bootstrap PROPERTIES POSITION_INDEPENDENT_CODE ${VK_BOOTSTRAP_POSITION_INDEPENDENT_CODE}) + +if(VK_BOOTSTRAP_TEST) + enable_testing() + + option(ENABLE_ADDRESS_SANITIZER "Use address sanitization") + if (ENABLE_ADDRESS_SANITIZER) + if (MSVC) + target_compile_options(vk-bootstrap-compiler-warnings INTERFACE /fsanitize=address $<$>:/wd5072>) + target_link_options(vk-bootstrap-compiler-warnings INTERFACE /INCREMENTAL:NO) + add_compile_definitions(_DISABLE_VECTOR_ANNOTATION _DISABLE_STRING_ANNOTATION) + else() + target_compile_options(vk-bootstrap-compiler-warnings INTERFACE -fsanitize=address) + target_link_options(vk-bootstrap-compiler-warnings INTERFACE -fsanitize=address) + endif() + endif() + + option(ENABLE_THREAD_SANITIZER "Use thread sanitization") + if (ENABLE_THREAD_SANITIZER) + if (MSVC) + message(FATAL_ERROR "MSVC doesn't support thread sanitization!") + else() + target_compile_options(vk-bootstrap-compiler-warnings INTERFACE -fsanitize=thread) + target_link_options(vk-bootstrap-compiler-warnings INTERFACE -fsanitize=thread) + endif() + endif() + + add_subdirectory(ext) + add_subdirectory(tests) + add_subdirectory(example) +endif () + +if (VK_BOOTSTRAP_INSTALL) + + include(GNUInstallDirs) + include(CMakePackageConfigHelpers) + + install(FILES src/VkBootstrap.h src/VkBootstrapDispatch.h src/VkBootstrapFeatureChain.h src/VkBootstrapFeatureChain.inl DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + + install( + TARGETS vk-bootstrap vk-bootstrap-compiler-warnings + EXPORT vk-bootstrap-targets + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + ) + install( + EXPORT vk-bootstrap-targets + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/vk-bootstrap + NAMESPACE vk-bootstrap:: + ) + + # Create vk-bootstrap-config.cmake + set(VK_BOOTSTRAP_EXPORT_TARGETS ${CMAKE_INSTALL_LIBDIR}/cmake/vk-bootstrap/vk-bootstrap-targets.cmake) + file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/vk-bootstrap-config.cmake.in" [=[ + @PACKAGE_INIT@ + + # Try to find Vulkan-Headers using find_package. Assume the package is available through either Vulkan-Headers or the older Vulkan + # Package managers should have Vulkan-Headers be a dependency of this repo. + find_package(VulkanHeaders CONFIG) + if (NOT VulkanHeaders_FOUND) + find_package(Vulkan) + if (NOT Vulkan_FOUND) + message(FATAL_ERROR "Unable to locate required dependency Vulkan::Headers!") + endif() + endif() + include(@PACKAGE_VK_BOOTSTRAP_EXPORT_TARGETS@) + ]=]) + + configure_package_config_file( + ${CMAKE_CURRENT_BINARY_DIR}/vk-bootstrap-config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/vk-bootstrap-config.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/vk-bootstrap + PATH_VARS VK_BOOTSTRAP_EXPORT_TARGETS + NO_SET_AND_CHECK_MACRO + NO_CHECK_REQUIRED_COMPONENTS_MACRO + ) + + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/vk-bootstrap-config.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/vk-bootstrap + ) + +endif() diff --git a/extern/vk-bootstrap/LICENSE.txt b/extern/vk-bootstrap/LICENSE.txt new file mode 100644 index 0000000000..8eececa06c --- /dev/null +++ b/extern/vk-bootstrap/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2020 Charles Giessen (charles@lunarg.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/extern/vk-bootstrap/README.md b/extern/vk-bootstrap/README.md new file mode 100644 index 0000000000..620d6bee61 --- /dev/null +++ b/extern/vk-bootstrap/README.md @@ -0,0 +1,141 @@ +# `vk-bootstrap` + +A utility library that jump starts initialization of Vulkan + +This library simplifies the tedious process of: + +* Instance creation +* Physical Device selection +* Device creation +* Getting queues +* Swapchain creation + +It also adds several conveniences for: + +* Enabling validation layers +* Adding a debug callback messenger +* Enabling extensions on a physical device +* Select a gpu based on a set of criteria like features, extensions, memory, etc + +Read the [Getting Started](docs/getting_started.md) guide for a quick start on using `vk-bootstrap` + +## Basic Usage + +See [basic_usage.cpp](./example/basic_usage.cpp) to see 500 lines slimmed down to about 50 + +```cpp +#include "VkBootstrap.h" + +void init_vulkan () { + vkb::InstanceBuilder builder; + auto inst_ret = builder.set_app_name ("Example Vulkan Application") + .request_validation_layers () + .use_default_debug_messenger () + .build (); + if (!inst_ret) { /* report */ } + vkb::Instance vkb_inst = inst_ret.value (); + + vkb::PhysicalDeviceSelector selector{ vkb_inst }; + auto phys_ret = selector.set_surface (surface) + .set_minimum_version (1, 1) + .require_dedicated_transfer_queue () + .select (); + if (!phys_ret) { /* report */ } + + vkb::DeviceBuilder device_builder{ phys_ret.value () }; + auto dev_ret = device_builder.build (); + if (!dev_ret) { /* report */ } + vkb::Device vkb_device = dev_ret.value (); + + auto graphics_queue_ret = vkb_device.get_queue (vkb::QueueType::graphics); + if (!graphics_queue_ret) { /* report */ } + VkQueue graphics_queue = graphics_queue_ret.value (); +} +``` + +### More fun quick examples + +- [triangle.cpp](./example/triangle.cpp) - renders a triangle to the screen. +- [simple_compute.cpp](./example/simple_compute.cpp) - Does a simple add with a compute shader and no surface in less than 300 lines. + +## Setting up `vk-bootstrap` + +This library has no external dependencies beyond C++17, its standard library, and at least the 1.1 version of the Vulkan Headers. + +Note: on Unix platforms, `vk-bootstrap` will require the dynamic linker in order to compile as the library doesn't link against `vulkan-1.dll`/`libvulkan.so` directly. + +### Copy-Paste + +Copy the `src/VkBootstrap.h`, `src/VkBootstrapDispatch.h`, and `src/VkBootstrap.cpp` files into your project, include them into your build, then compile as you normally would. + +`vk-bootstrap` is *not* a header only library, so no need to worry about macros in the header. + +#### Linux specific + +vk-bootstrap will load the required symbols at runtime, which requires that the application is linked to the system dynamic link. +How the dynamic linker is linked into the project depends on the build system in question. +If CMake is being used, link vk-bootstrap with `${CMAKE_DL_LIBS}`. + +### git-submodule + CMake + +Add this project as a git-submodule into the root directory. Suggested is using a subdirectory to hold all submodules. + +```bash +git submodule add https://github.com/charles-lunarg/vk-bootstrap +``` + +With CMake, add the subdirectory to include the project + +```cmake +add_subdirectory(vk-bootstrap) +``` + +Then use `target_link_libraries` to use the library in whichever target needs it. + +```cmake +target_link_libraries(your_application_name vk-bootstrap::vk-bootstrap) +``` + +### CMake Fetch Content +If cmake 3.12 is available, use the FetchContent capability of cmake to directly download and build the library for you. + +```cmake +include(FetchContent) +FetchContent_Declare( + fetch_vk_bootstrap + GIT_REPOSITORY https://github.com/charles-lunarg/vk-bootstrap + GIT_TAG BRANCH_OR_TAG #suggest using a tag so the library doesn't update whenever new commits are pushed to a branch +) +FetchContent_MakeAvailable(fetch_vk_bootstrap) +target_link_libraries(your_application_name vk-bootstrap::vk-bootstrap) +``` + +### Manually Building + +```bash +git clone https://github.com/charles-lunarg/vk-bootstrap +cd vk-bootstrap +mkdir build +cd build +cmake .. +``` + +### Vulkan-Headers dependency + +By default, when using vk-bootstrap through CMake, it will attempt to locate the Vulkan-Headers on the system and fall back to downloading them directly if they aren't present. If the `VK_BOOTSTRAP_VULKAN_HEADER_DIR` option is specified, it will use that directory instead. + + +### Testing + +Tests will be enabled if you open this project standalone. If you include this project as a subdirectory or sub-project, you can force enable tests by setting the option `VK_BOOTSTRAP_TEST` to `ON`. Testing requires GLFW and Catch2 but are acquired automatically using cmake fetch content. + +```bash +cmake ../path/to/your_project/ -DVK_BOOTSTRAP_TEST=ON +``` + +### Build Options +| Name | Type | Default Value | Description | +| -------------------------------- | ------ | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `VK_BOOTSTRAP_WERROR` | bool | `OFF` | Enable warnings as errors during compilation. | +| `VK_BOOTSTRAP_TEST` | bool | `OFF` | Enable building of the tests in this project. Will download GLFW and Catch2 automatically if enabled. | +| `VK_BOOTSTRAP_VULKAN_HEADER_DIR` | string | `""` | Optional. Specify the directory that contains the Vulkan Headers. Useful if you are downloading the headers manually and don't want vk-bootstrap to download them itself. | diff --git a/extern/vk-bootstrap/ext/CMakeLists.txt b/extern/vk-bootstrap/ext/CMakeLists.txt new file mode 100644 index 0000000000..4dc1655718 --- /dev/null +++ b/extern/vk-bootstrap/ext/CMakeLists.txt @@ -0,0 +1,25 @@ +option(GLFW_BUILD_TESTS "" OFF) +option(GLFW_BUILD_DOCS "" OFF) +option(GLFW_INSTALL "" OFF) +option(GLFW_BUILD_EXAMPLES "" OFF) + +option(CATCH_BUILD_TESTING "" OFF) +option(CATCH_ENABLE_WERROR "" OFF) +option(CATCH_INSTALL_DOCS "" OFF) +option(CATCH_INSTALL_HELPERS "" OFF) +option(CATCH_INSTALL_EXTRAS "" OFF) + +include(FetchContent) +FetchContent_Declare( + glfw + GIT_REPOSITORY https://github.com/glfw/glfw + GIT_TAG 3.3.8 +) +FetchContent_MakeAvailable(glfw) +FetchContent_Declare( + Catch2 + GIT_REPOSITORY https://github.com/catchorg/Catch2 + GIT_TAG v3.4.0 +) +FetchContent_MakeAvailable(Catch2) + diff --git a/extern/vk-bootstrap/gen/CurrentBuildVulkanVersion.cmake b/extern/vk-bootstrap/gen/CurrentBuildVulkanVersion.cmake new file mode 100644 index 0000000000..21419e150c --- /dev/null +++ b/extern/vk-bootstrap/gen/CurrentBuildVulkanVersion.cmake @@ -0,0 +1,2 @@ +set(VK_BOOTSTRAP_SOURCE_HEADER_VERSION 1.4.330) +set(VK_BOOTSTRAP_SOURCE_HEADER_VERSION_GIT_TAG v1.4.330) diff --git a/extern/vk-bootstrap/script/generate_dispatch.py b/extern/vk-bootstrap/script/generate_dispatch.py new file mode 100644 index 0000000000..003767350b --- /dev/null +++ b/extern/vk-bootstrap/script/generate_dispatch.py @@ -0,0 +1,247 @@ + +# +# generate_dispatch.py +# +# Copyright © 2021 Cody Goodson (contact@vibimanx.com) +# Copyright © 2022-2025 Charles Giessen (charles@lunarg.com) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without +# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +# of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# This file is a part of VkBootstrap +# https://github.com/charles-lunarg/vk-bootstrap + +# This script requires the vulkan_object package to be installed +# https://github.com/KhronosGroup/vulkan-object + +import os +import codecs +from vulkan_object import get_vulkan_object + +vk = get_vulkan_object() + +# Command Exclusions +exclusions = [ + 'vkGetDeviceProcAddr', + 'vkCreateDevice', + 'vkDestroyDevice' +] + +INSTANCE = 'instance' +DEVICE = 'device' + +# No good way to detect incompatibilities with the macro defines and the actual functions. Just keep a list here +HEADER_VERSION_WORKAROUNDS = { + 'vkGetLatencyTimingsNV': '271', # Changed API parameters + 'vkCmdSetDiscardRectangleEnableEXT': '241', # new function in older extension + 'vkCmdSetDiscardRectangleModeEXT': '241', # new function in older extension + 'vkCmdSetExclusiveScissorEnableNV': '241', # Changed API parameters + 'vkCmdInitializeGraphScratchMemoryAMDX': '298', # Changed API parameters + 'vkCmdDispatchGraphAMDX': '298', # Changed API parameters + 'vkCmdDispatchGraphIndirectAMDX': '298', # Changed API parameters + 'vkCmdDispatchGraphIndirectCountAMDX': '298', # Changed API parameters + 'vkCmdDispatchTileQCOM': '316', # Changed API parameters +} + +# License +dispatch_license = '''/* + * Copyright © 2021 Cody Goodson (contact@vibimanx.com) + * Copyright © 2022-2025 Charles Giessen (charles@lunarg.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the “Software”), to deal in the Software without restriction, including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +''' + +# Info +info = '// This file is a part of VkBootstrap\n' +info += '// https://github.com/charles-lunarg/vk-bootstrap\n\n' + +# # Content +head = '\n#pragma once\n\n#include \n\n' +head += 'namespace vkb {\n\n' + + +def get_command_guards(command): + version_guard = f'defined({command.version.name})' if command.version is not None else '' + ext_guard = ' || '.join([f'defined({e})' for e in command.extensions] ) + return f'{version_guard}{" || " if len(version_guard) > 0 and len(ext_guard) > 0 else ""}{ext_guard}' + +def command_include_guard(command): + guards = get_command_guards(command) + if len(guards) == 0: + return '' + out = '#if (' + guards + ')' + if command.name in HEADER_VERSION_WORKAROUNDS: + out += f' && VK_HEADER_VERSION >= {HEADER_VERSION_WORKAROUNDS[command.name]}' + out += '\n' + return out + +def command_end_include_guard(command): + guards = get_command_guards(command) + return '#endif\n' if len(guards) > 0 else '' + +def command_end_include_guard_member_decl(command): + guards = get_command_guards(command) + if len(guards) == 0: + return '' + return f'#else\n void * fp_{command.name}{{}};\n#endif\n' + +# Types which have been promoted from use the promoted types instead of their original types. +# Because we support using vk-bootstrap generated from newer code with older headers, we must manually undo these promotions + +def get_depromotion_map(): + depromotion_map = {} + type_alias_map = {} + + # Gather aliases from all types + for struct_name, struct in vk.structs.items(): + if len(struct.aliases) > 0: + type_alias_map[struct_name] = struct.aliases + for enum_name, enum in vk.structs.items(): + if len(enum.aliases) > 0: + type_alias_map[enum_name] = enum.aliases + for flag_name, flag in vk.enums.items(): + if len(flag.aliases) > 0: + type_alias_map[flag_name] = flag.aliases + for bitmask_name, bitmask in vk.bitmasks.items(): + if len(bitmask.aliases) > 0: + type_alias_map[bitmask_name] = bitmask.aliases + for handle_name, handle in vk.handles.items(): + if len(handle.aliases) > 0: + type_alias_map[handle_name] = handle.aliases + + for command_name, command in vk.commands.items(): + if command.alias is not None: + command_tag = None + for tag in vk.vendorTags: + if tag in command_name: + command_tag = tag + if command_tag is None: + continue + + for param in command.params: + if param.type in type_alias_map: + best_alias_match = [x for x in type_alias_map[param.type] if x.endswith(command_tag)][0] + if command_name not in depromotion_map: + depromotion_map[command_name] = [] + depromotion_map[command_name].append([param.type, best_alias_match]) + + return depromotion_map + +def create_dispatch_table(dispatch_type): + depromotion_map = get_depromotion_map() + + out = '' + if dispatch_type == INSTANCE: + commands = [x for x in vk.commands.values() if x.instance and x.name not in exclusions and x.params[0].type in ['VkInstance','VkPhysicalDevice']] + out += 'struct InstanceDispatchTable {\n' + out += ' InstanceDispatchTable() = default;\n' + out += ' InstanceDispatchTable(VkInstance instance, PFN_vkGetInstanceProcAddr procAddr) : instance(instance), populated(true) {\n' + else: + commands = [x for x in vk.commands.values() if x.device and x.name not in exclusions] + out += 'struct DispatchTable {\n' + out += ' DispatchTable() = default;\n' + out += ' DispatchTable(VkDevice device, PFN_vkGetDeviceProcAddr procAddr) : device(device), populated(true) {\n' + + commands = sorted(commands) + + for command in commands: + out += command_include_guard(command) + out += f' fp_{command.name} = reinterpret_cast(procAddr({dispatch_type}, "{command.name}"));\n' + out += command_end_include_guard(command) + out += ' }\n' + + for command in commands: + out += command_include_guard(command) + + params = command.params + if command.params[0].name == dispatch_type: + params = params[1:] + param_decl = [x.cDeclaration.strip() for x in params] + param_names = [x.name for x in params] + if command.name in depromotion_map: + new_param_decl = [] + cur_depromotion_list = depromotion_map[command.name] + for param in param_decl: + valid_demotions = [] + for cur_depromotion in cur_depromotion_list: + if cur_depromotion[0] in param: + valid_demotions.append(cur_depromotion) + if len(valid_demotions) > 0: + + depromotion_to_apply = valid_demotions[0] + # Sometimes demotions are valid substrings of the wrong parameter type. Because we just concat the cDecl, there isn't a great way to detect this. + # Rather, we just pick the longest depromotion available. + for cur_depromotion in valid_demotions: + if len(cur_depromotion[0]) > len(depromotion_to_apply[0]): + depromotion_to_apply = cur_depromotion + new_param_decl.append(param.replace(depromotion_to_apply[0], depromotion_to_apply[1])) + else: + new_param_decl.append(param) + param_decl = new_param_decl + + if command.params[0].name == dispatch_type: + param_names.insert(0, dispatch_type) + out += f' {command.returnType} {command.name[2].lower()}{command.name[3:]}({", ".join(param_decl)}) const noexcept {{\n {"return " if command.returnType != "void" else ""}fp_{command.name}({", ".join(param_names)});\n }}\n' + out += command_end_include_guard(command) + + for command in commands: + out += command_include_guard(command) + out += f' PFN_{command.name} fp_{command.name} = nullptr;\n' + out += command_end_include_guard_member_decl(command) + + out += ' bool is_populated() const { return populated; }\n' + out += f' Vk{dispatch_type.capitalize()} {dispatch_type} = VK_NULL_HANDLE;\n' + out += 'private:\n' + out += ' bool populated = false;\n' + out += '};\n\n' + return out + +tail = '} // namespace vkb' + +# find the version used to generate the code +path_to_src = os.path.join('src') +if not os.path.exists(path_to_src): + path_to_src = os.path.join('..', 'src') +if not os.path.exists(path_to_src): + print('Could not find source folder. Is the current directory wrong?') + exit(-1) + +header_file = codecs.open(os.path.join(path_to_src,'VkBootstrapDispatch.h'), 'w', 'utf-8') +header_file.write(dispatch_license + info + head + create_dispatch_table('instance') + create_dispatch_table('device') + tail) +header_file.close() + +path_to_gen = os.path.join('gen') +if not os.path.exists(path_to_gen): + path_to_gen = os.path.join('..', 'gen') +if not os.path.exists(path_to_gen): + print('Could not find gen folder. Is the current directory wrong?') + exit(-1) + +# Generate a CMake file that contains the header version used. +cmake_version_file = codecs.open(os.path.join(path_to_gen,'CurrentBuildVulkanVersion.cmake'), 'w', 'utf-8') +cmake_version_file.write(f'set(VK_BOOTSTRAP_SOURCE_HEADER_VERSION {vk.headerVersionComplete})\n') +cmake_version_file.write(f'set(VK_BOOTSTRAP_SOURCE_HEADER_VERSION_GIT_TAG v{vk.headerVersionComplete})\n') +cmake_version_file.close() + +print('Generation finished.') diff --git a/extern/vk-bootstrap/script/generate_features_chain.py b/extern/vk-bootstrap/script/generate_features_chain.py new file mode 100644 index 0000000000..79c9f67b7b --- /dev/null +++ b/extern/vk-bootstrap/script/generate_features_chain.py @@ -0,0 +1,245 @@ + +# +# generate_features_chain.py +# +# Copyright © 2025 Charles Giessen (charles@lunarg.com) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without +# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +# of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# This file is a part of VkBootstrap +# https://github.com/charles-lunarg/vk-bootstrap + +# This script requires the vulkan_object package to be installed +# https://github.com/KhronosGroup/vulkan-object + +import os +import codecs +from vulkan_object import get_vulkan_object + +vk = get_vulkan_object() + +header = '''/* + * Copyright © 2025 Charles Giessen (charles@lunarg.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the “Software”), to deal in the Software without restriction, including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +// This file is a part of VkBootstrap +// https://github.com/charles-lunarg/vk-bootstrap + +#pragma once +#include +#include +#include +''' + +namespace_decl = ''' +namespace vkb::detail { + +''' +feature_struct_ext_map = {} +feature_struct_ext_stype_map = {} + +def setup_feature_struct_aliases(): + for feature in [x for x in vk.structs.values() if x.extends is not None and 'VkPhysicalDeviceFeatures2' in x.extends and len(x.aliases) >0]: + type_list = [feature.name] + feature.aliases + for type_name in type_list: + for ext in feature.extensions: + if vk.extensions[ext].vendorTag is not None and type_name.endswith(vk.extensions[ext].vendorTag): + feature_struct_ext_map[type_name] = ext + for sTypes in vk.extensions[ext].enumFields['VkStructureType']: + if sTypes.name == feature.sType and type_name is not feature.name: + feature_struct_ext_stype_map[type_name] = sTypes.aliases[0] + break + break + +def get_struct_guards(struct, struct_name): + reqs = [] + if struct.protect is not None: + reqs.append( f'defined({struct.protect})') + enablement = [] + if struct.version is not None: + enablement.append(f'defined({struct.version.name})') + if struct_name in feature_struct_ext_map: + enablement.append(f'defined({feature_struct_ext_map[struct_name]})') + else: + for ext in struct.extensions: + enablement.append(f'defined({ext})') + reqs.append('(' + ' || '.join(enablement) + ')') + return ' && '.join(reqs) + +def get_struct_guards_start(struct, struct_name): + guard = get_struct_guards(struct, struct_name) + return f'#if {guard}\n' if guard else '\n' + +def get_struct_guards_end(struct, struct_name): + guard = get_struct_guards(struct, struct_name) + return f'#endif //{guard}\n' if guard else '\n' + +def print_required_platform_defines(): + required_platforms = set() + for feature in [x for x in vk.structs.values() if x.extends is not None and 'VkPhysicalDeviceFeatures2' in x.extends]: + if feature.protect: + required_platforms.add(feature.protect) + + required_platforms = sorted(required_platforms) + out = '' + for required_platform in required_platforms: + out += f'#if defined({required_platform})\n' + if required_platform == 'VK_ENABLE_BETA_EXTENSIONS': + out += '#include \n' + elif required_platform == 'VK_USE_PLATFORM_SCREEN_QNX': + out += '#include \n' + out += '#include \n' + elif required_platform == 'VK_USE_PLATFORM_ANDROID_KHR': + out += '#include \n' + else: + raise RuntimeError(f'Unhandled required platform {required_platform}!') + out += f'#endif // defined({required_platform})\n' + return out + +def create_feature_chain_declarations(): + out = '' + out += 'void compare_VkPhysicalDeviceFeatures(std::vector & error_list, VkPhysicalDeviceFeatures const& supported, VkPhysicalDeviceFeatures const& requested);\n' + out += 'void merge_VkPhysicalDeviceFeatures(VkPhysicalDeviceFeatures & current, VkPhysicalDeviceFeatures const& merge_in);\n' + + for feature in [x for x in vk.structs.values() if x.extends is not None and 'VkPhysicalDeviceFeatures2' in x.extends]: + type_list = [feature.name] + feature.aliases + for type_name in type_list: + out += get_struct_guards_start(feature, type_name) + out += f'void compare_{type_name}(std::vector & error_list, {type_name} const& supported, {type_name} const& requested);\n' + out += f'void merge_{type_name}({type_name} & current, {type_name} const& merge_in);\n' + out += get_struct_guards_end(feature, type_name) + + out += 'void compare_feature_struct(VkStructureType sType, std::vector & error_list, const void* supported, const void* requested);\n' + out += 'void merge_feature_struct(VkStructureType sType, void* current, const void* merge_in);\n' + return out + +def create_feature_chain_definitions(): + out = '' + + out += 'void compare_VkPhysicalDeviceFeatures(std::vector & error_list, VkPhysicalDeviceFeatures const& supported, VkPhysicalDeviceFeatures const& requested) {\n' + for member in vk.structs['VkPhysicalDeviceFeatures'].members: + if member.name in ['sType', 'pNext']: + continue + out += f' if (requested.{member.name} && !supported.{member.name}) {{\n error_list.push_back("Missing feature VkPhysicalDeviceFeatures::{member.name}");\n }}\n' + out += '}\n' + out += 'void merge_VkPhysicalDeviceFeatures(VkPhysicalDeviceFeatures & current, VkPhysicalDeviceFeatures const& merge_in) {\n' + for member in vk.structs['VkPhysicalDeviceFeatures'].members: + if member.name in ['sType', 'pNext']: + continue + out += f' current.{member.name} = current.{member.name} || merge_in.{member.name};\n' + out += '}\n' + + for feature in [x for x in vk.structs.values() if x.extends is not None and 'VkPhysicalDeviceFeatures2' in x.extends]: + type_list = [feature.name] + feature.aliases + for type_name in type_list: + out += get_struct_guards_start(feature, type_name) + + out += f'void compare_{type_name}(std::vector & error_list, {type_name} const& supported, {type_name} const& requested) {{\n' + for member in feature.members: + if member.name in ['sType', 'pNext']: + continue + out += f' if (requested.{member.name} && !supported.{member.name}) {{\n error_list.push_back("Missing feature {type_name}::{member.name}");\n }}\n' + + out += '}\n' + out += f'void merge_{type_name}({type_name} & current, {type_name} const& merge_in) {{\n' + for member in feature.members: + if member.name in ['sType', 'pNext']: + continue + out += f' current.{member.name} = current.{member.name} || merge_in.{member.name};\n' + + out += '}\n' + out += get_struct_guards_end(feature, type_name) + + out += 'void compare_feature_struct(VkStructureType sType, std::vector & error_list, const void* supported, const void* requested) {\n' + out += ' switch (sType) {\n' + for feature in [x for x in vk.structs.values() if x.extends is not None and 'VkPhysicalDeviceFeatures2' in x.extends]: + type_list = [feature.name] + feature.aliases + printed_already = False + guard = None + for type_name in type_list: + guard = get_struct_guards(feature, type_name) + out += f'#{"el" if printed_already else ""}if {guard}\n' + printed_already = True + out += f' case({feature_struct_ext_stype_map[type_name] if type_name in feature_struct_ext_stype_map else feature.sType}):\n' + out += f' compare_{type_name}(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested));\n' + out += ' break;\n' + if not guard is None: + out += '#endif\n' + out += ' default:\n' + out += ' break;\n' + out += ' }\n' + out += '}\n' + + out += 'void merge_feature_struct(VkStructureType sType, void* current, const void* merge_in) {\n' + out += ' switch (sType) {\n' + for feature in [x for x in vk.structs.values() if x.extends is not None and 'VkPhysicalDeviceFeatures2' in x.extends]: + type_list = [feature.name] + feature.aliases + printed_already = False + guard = None + for type_name in type_list: + guard = get_struct_guards(feature, type_name) + out += f'#{"el" if printed_already else ""}if {guard}\n' + printed_already = True + out += f' case({feature_struct_ext_stype_map[type_name] if type_name in feature_struct_ext_stype_map else feature.sType}):\n' + out += f' merge_{type_name}(*reinterpret_cast<{type_name}*>(current), *reinterpret_cast(merge_in));\n' + out += ' break;\n' + if not guard is None: + out += '#endif\n' + out += ' default:\n' + out += ' break;\n' + out += ' }\n' + out += '}\n' + + return out + +tail = '} // namespace vkb' + +# find the version used to generate the code +path_to_src = os.path.join('src') +if not os.path.exists(path_to_src): + path_to_src = os.path.join('..', 'src') +if not os.path.exists(path_to_src): + print('Could not find source folder. Is the current directory wrong?') + exit(-1) + +setup_feature_struct_aliases() + +features_chain = codecs.open(os.path.join(path_to_src,'VkBootstrapFeatureChain.h'), 'w', 'utf-8') +features_chain.write(header + print_required_platform_defines() + namespace_decl + create_feature_chain_declarations() + tail) +features_chain.close() + +features_chain_inl = codecs.open(os.path.join(path_to_src,'VkBootstrapFeatureChain.inl'), 'w', 'utf-8') +features_chain_inl.write(header + namespace_decl + create_feature_chain_definitions() + tail) +features_chain_inl.close() + +path_to_gen = os.path.join('gen') +if not os.path.exists(path_to_gen): + path_to_gen = os.path.join('..', 'gen') +if not os.path.exists(path_to_gen): + print('Could not find gen folder. Is the current directory wrong?') + exit(-1) + +print('Generation finished.') diff --git a/extern/vk-bootstrap/src/VkBootstrap.cpp b/extern/vk-bootstrap/src/VkBootstrap.cpp new file mode 100644 index 0000000000..b723ac6aa3 --- /dev/null +++ b/extern/vk-bootstrap/src/VkBootstrap.cpp @@ -0,0 +1,2176 @@ +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the “Software”), to deal in the Software without restriction, including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Copyright © 2020 Charles Giessen (charles@lunarg.com) + */ + +#include "VkBootstrap.h" + +#include + +#if defined(_WIN32) +#include +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include +#endif // _WIN32 + +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) +#include +#endif + +#include +#include + +#include "VkBootstrapFeatureChain.inl" + +namespace vkb { + +namespace detail { + +bool FeaturesChain::empty() const { return structure_infos.empty(); } + +bool FeaturesChain::is_feature_struct_in_chain(VkStructureType sType) const { + return structure_infos.end() != find_sType(sType); +} + +void FeaturesChain::add_structure(VkStructureType sType, size_t struct_size, const void* structure) { +#if !defined(NDEBUG) + // Validation + assert(sType != static_cast(0) && "Features struct sType must be filled with the struct's " + "corresponding VkStructureType enum"); + assert(sType != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 && + "Do not pass VkPhysicalDeviceFeatures2 as a required extension feature structure. An " + "instance of this is managed internally for selection criteria and device creation."); +#endif + + auto found = find_sType(sType); + if (found != structure_infos.end()) { + // Merge structure into the current structure +#if !defined(NDEBUG) + assert(found->starting_location + found->struct_size <= structures.size() && + "Internal Consistency Error: FeatureChain::add_structure tyring to merge structures into memory that is " + "past the end of the structures array"); +#endif + merge_feature_struct(sType, &(structures.at(found->starting_location)), structure); + } else { + // Add a structure into the chain + structure_infos.push_back(StructInfo{ sType, structures.size(), struct_size }); + auto& new_structure_info = structure_infos.back(); + structures.insert(structures.end(), struct_size, uint8_t(0)); + memcpy(&(structures.at(new_structure_info.starting_location)), structure, struct_size); + } +} + +void FeaturesChain::remove_structure(VkStructureType sType) { + auto found = find_sType(sType); + if (found != structure_infos.end()) { + if (found->starting_location + found->struct_size < structures.size()) { + structures.erase(structures.begin() + static_cast(found->starting_location), + structures.begin() + static_cast(found->starting_location + found->struct_size)); + structure_infos.erase(found); + } + } +} + +bool FeaturesChain::match(VkStructureType sType, const void* structure) const { + auto found = find_sType(sType); + if (found != structure_infos.end()) { + std::vector error_list; + compare_feature_struct(sType, error_list, &(structures.at(found->starting_location)), structure); + return error_list.empty(); + } else { + return false; + } +} + +void FeaturesChain::match_all(std::vector& error_list, FeaturesChain const& requested_features_chain) const { + if (structure_infos.size() != requested_features_chain.structure_infos.size()) { + return; + } + for (size_t i = 0; i < structure_infos.size(); ++i) { + compare_feature_struct(structure_infos.at(i).sType, + error_list, + &(structures.at(structure_infos.at(i).starting_location)), + &(requested_features_chain.structures.at(requested_features_chain.structure_infos.at(i).starting_location))); + } +} + +void FeaturesChain::create_chained_features(VkPhysicalDeviceFeatures2& features2) { + features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; + features2.pNext = &structures.at(0); + // Write the address of structure N+1 to the pNext member of structure N + for (size_t i = 0; i < structure_infos.size() - 1; i++) { + VkBaseOutStructure structure{}; + memcpy(&structure, &(structures.at(structure_infos.at(i).starting_location)), sizeof(VkBaseOutStructure)); + structure.pNext = reinterpret_cast(&(structures.at(structure_infos.at(i + 1).starting_location))); + memcpy(&(structures.at(structure_infos.at(i).starting_location)), &structure, sizeof(VkBaseOutStructure)); + } + // Write nullptr to the last structures pNext member + VkBaseOutStructure structure{}; + memcpy(&structure, &(structures.at(structure_infos.back().starting_location)), sizeof(VkBaseOutStructure)); + structure.pNext = nullptr; + memcpy(&(structures.at(structure_infos.back().starting_location)), &structure, sizeof(VkBaseOutStructure)); +} + +std::vector FeaturesChain::get_pNext_chain_members() { + std::vector members; + for (const auto& structure_info : structure_infos) { + members.push_back(&(structures.at(structure_info.starting_location))); + } + return members; +} + +std::vector::const_iterator FeaturesChain::find_sType(VkStructureType sType) const { + return std::find_if(structure_infos.begin(), structure_infos.end(), [sType](StructInfo const& struct_info) { + return struct_info.sType == sType; + }); +} + + +class VulkanFunctions { + private: + std::mutex init_mutex; + bool initialized = false; + + std::mutex instance_functions_mutex; + bool instance_functions_initialized = false; + +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) + void* library = nullptr; +#elif defined(_WIN32) + HMODULE library = nullptr; +#endif + + bool load_vulkan_library() { + // Can immediately return if it has already been loaded + if (library) { + return true; + } +#if defined(__linux__) || defined(__FreeBSD__) + library = dlopen("libvulkan.so.1", RTLD_NOW | RTLD_LOCAL); + if (!library) library = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL); +#elif defined(__APPLE__) + library = dlopen("libvulkan.dylib", RTLD_NOW | RTLD_LOCAL); + if (!library) library = dlopen("libvulkan.1.dylib", RTLD_NOW | RTLD_LOCAL); + if (!library) library = dlopen("libMoltenVK.dylib", RTLD_NOW | RTLD_LOCAL); +#elif defined(_WIN32) + library = LoadLibrary(TEXT("vulkan-1.dll")); +#else + assert(false && "Unsupported platform"); +#endif + if (!library) return false; + load_func(ptr_vkGetInstanceProcAddr, "vkGetInstanceProcAddr"); + return ptr_vkGetInstanceProcAddr != nullptr; + } + + template void load_func(T& func_dest, const char* func_name) { +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) + func_dest = reinterpret_cast(dlsym(library, func_name)); +#elif defined(_WIN32) + // GetProcAddress returns FARPROC, so need to cast it into a void* which can safely be cast to T + func_dest = reinterpret_cast(reinterpret_cast(GetProcAddress(library, func_name))); +#endif + } + void unload_vulkan_library() { + if (!library) { + return; + } +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) + dlclose(library); +#elif defined(_WIN32) + FreeLibrary(library); +#endif + library = nullptr; + } + + public: + bool init_vulkan_funcs(PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr = nullptr) { + std::lock_guard lg(init_mutex); + if (initialized) { + return true; + } + if (fp_vkGetInstanceProcAddr != nullptr) { + ptr_vkGetInstanceProcAddr = fp_vkGetInstanceProcAddr; + } else { + bool ret = load_vulkan_library(); + if (!ret) return false; + } + + fp_vkEnumerateInstanceExtensionProperties = reinterpret_cast( + ptr_vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceExtensionProperties")); + fp_vkEnumerateInstanceLayerProperties = reinterpret_cast( + ptr_vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceLayerProperties")); + fp_vkEnumerateInstanceVersion = reinterpret_cast( + ptr_vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkEnumerateInstanceVersion")); + fp_vkCreateInstance = + reinterpret_cast(ptr_vkGetInstanceProcAddr(VK_NULL_HANDLE, "vkCreateInstance")); + initialized = true; + return true; + } + + template void get_inst_proc_addr(T& out_ptr, const char* func_name) { + out_ptr = reinterpret_cast(ptr_vkGetInstanceProcAddr(instance, func_name)); + } + + template void get_device_proc_addr(VkDevice device, T& out_ptr, const char* func_name) { + out_ptr = reinterpret_cast(fp_vkGetDeviceProcAddr(device, func_name)); + } + + PFN_vkGetInstanceProcAddr ptr_vkGetInstanceProcAddr = nullptr; + VkInstance instance = nullptr; + + PFN_vkEnumerateInstanceExtensionProperties fp_vkEnumerateInstanceExtensionProperties = nullptr; + PFN_vkEnumerateInstanceLayerProperties fp_vkEnumerateInstanceLayerProperties = nullptr; + PFN_vkEnumerateInstanceVersion fp_vkEnumerateInstanceVersion = nullptr; + PFN_vkCreateInstance fp_vkCreateInstance = nullptr; + + PFN_vkDestroyInstance fp_vkDestroyInstance = nullptr; + PFN_vkCreateDebugUtilsMessengerEXT fp_vkCreateDebugUtilsMessengerEXT = nullptr; + PFN_vkDestroyDebugUtilsMessengerEXT fp_vkDestroyDebugUtilsMessengerEXT = nullptr; + PFN_vkEnumeratePhysicalDevices fp_vkEnumeratePhysicalDevices = nullptr; + PFN_vkGetPhysicalDeviceFeatures fp_vkGetPhysicalDeviceFeatures = nullptr; + PFN_vkGetPhysicalDeviceFeatures2 fp_vkGetPhysicalDeviceFeatures2 = nullptr; + PFN_vkGetPhysicalDeviceFeatures2KHR fp_vkGetPhysicalDeviceFeatures2KHR = nullptr; + PFN_vkGetPhysicalDeviceProperties fp_vkGetPhysicalDeviceProperties = nullptr; + PFN_vkGetPhysicalDeviceQueueFamilyProperties fp_vkGetPhysicalDeviceQueueFamilyProperties = nullptr; + PFN_vkGetPhysicalDeviceMemoryProperties fp_vkGetPhysicalDeviceMemoryProperties = nullptr; + PFN_vkEnumerateDeviceExtensionProperties fp_vkEnumerateDeviceExtensionProperties = nullptr; + + PFN_vkCreateDevice fp_vkCreateDevice = nullptr; + PFN_vkGetDeviceProcAddr fp_vkGetDeviceProcAddr = nullptr; + + PFN_vkDestroySurfaceKHR fp_vkDestroySurfaceKHR = nullptr; + PFN_vkGetPhysicalDeviceSurfaceSupportKHR fp_vkGetPhysicalDeviceSurfaceSupportKHR = nullptr; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR fp_vkGetPhysicalDeviceSurfaceFormatsKHR = nullptr; + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR fp_vkGetPhysicalDeviceSurfacePresentModesKHR = nullptr; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = nullptr; + + void init_instance_funcs(VkInstance inst) { + std::lock_guard lg(instance_functions_mutex); + if (instance_functions_initialized) return; + instance = inst; + get_inst_proc_addr(fp_vkDestroyInstance, "vkDestroyInstance"); + get_inst_proc_addr(fp_vkCreateDebugUtilsMessengerEXT, "vkCreateDebugUtilsMessengerEXT"); + get_inst_proc_addr(fp_vkDestroyDebugUtilsMessengerEXT, "vkDestroyDebugUtilsMessengerEXT"); + get_inst_proc_addr(fp_vkEnumeratePhysicalDevices, "vkEnumeratePhysicalDevices"); + + get_inst_proc_addr(fp_vkGetPhysicalDeviceFeatures, "vkGetPhysicalDeviceFeatures"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceFeatures2, "vkGetPhysicalDeviceFeatures2"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceFeatures2KHR, "vkGetPhysicalDeviceFeatures2KHR"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceQueueFamilyProperties, "vkGetPhysicalDeviceQueueFamilyProperties"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); + get_inst_proc_addr(fp_vkEnumerateDeviceExtensionProperties, "vkEnumerateDeviceExtensionProperties"); + + get_inst_proc_addr(fp_vkCreateDevice, "vkCreateDevice"); + get_inst_proc_addr(fp_vkGetDeviceProcAddr, "vkGetDeviceProcAddr"); + + get_inst_proc_addr(fp_vkDestroySurfaceKHR, "vkDestroySurfaceKHR"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceSurfaceSupportKHR, "vkGetPhysicalDeviceSurfaceSupportKHR"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceSurfaceFormatsKHR, "vkGetPhysicalDeviceSurfaceFormatsKHR"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceSurfacePresentModesKHR, "vkGetPhysicalDeviceSurfacePresentModesKHR"); + get_inst_proc_addr(fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); + instance_functions_initialized = true; + } + + void deinit_all() { + { + std::lock_guard lg(instance_functions_mutex); + if (instance_functions_initialized) { + instance_functions_initialized = false; + } + } + { + std::lock_guard lg(init_mutex); + if (initialized) { + unload_vulkan_library(); + initialized = false; + } + } + } +}; + +static VulkanFunctions& vulkan_functions() { + static VulkanFunctions v; + return v; +} + +// Helper for robustly executing the two-call pattern +template auto get_vector(std::vector& out, F&& f, Ts&&... ts) -> VkResult { + uint32_t count = 0; + VkResult err; + do { + err = f(ts..., &count, nullptr); + if (err != VK_SUCCESS) { + return err; + }; + out.resize(count); + err = f(ts..., &count, out.data()); + out.resize(count); + } while (err == VK_INCOMPLETE); + return err; +} + +template auto get_vector_noerror(F&& f, Ts&&... ts) -> std::vector { + uint32_t count = 0; + std::vector results; + f(ts..., &count, nullptr); + results.resize(count); + f(ts..., &count, results.data()); + results.resize(count); + return results; +} +} // namespace detail + +const char* to_string_message_severity(VkDebugUtilsMessageSeverityFlagBitsEXT s) { + switch (s) { + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT: + return "VERBOSE"; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT: + return "ERROR"; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT: + return "WARNING"; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT: + return "INFO"; + default: + return "UNKNOWN"; + } +} +const char* to_string_message_type(VkDebugUtilsMessageTypeFlagsEXT s) { + if (s == 7) return "General | Validation | Performance"; + if (s == 6) return "Validation | Performance"; + if (s == 5) return "General | Performance"; + if (s == 4 /*VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT*/) return "Performance"; + if (s == 3) return "General | Validation"; + if (s == 2 /*VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT*/) return "Validation"; + if (s == 1 /*VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT*/) return "General"; + return "Unknown"; +} + +VkResult create_debug_utils_messenger(VkInstance instance, + PFN_vkDebugUtilsMessengerCallbackEXT debug_callback, + VkDebugUtilsMessageSeverityFlagsEXT severity, + VkDebugUtilsMessageTypeFlagsEXT type, + void* user_data_pointer, + VkDebugUtilsMessengerEXT* pDebugMessenger, + VkAllocationCallbacks* allocation_callbacks) { + + if (debug_callback == nullptr) debug_callback = default_debug_callback; + VkDebugUtilsMessengerCreateInfoEXT messengerCreateInfo = {}; + messengerCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; + messengerCreateInfo.pNext = nullptr; + messengerCreateInfo.messageSeverity = severity; + messengerCreateInfo.messageType = type; + messengerCreateInfo.pfnUserCallback = debug_callback; + messengerCreateInfo.pUserData = user_data_pointer; + + if (detail::vulkan_functions().fp_vkCreateDebugUtilsMessengerEXT != nullptr) { + return detail::vulkan_functions().fp_vkCreateDebugUtilsMessengerEXT( + instance, &messengerCreateInfo, allocation_callbacks, pDebugMessenger); + } else { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } +} + +void destroy_debug_utils_messenger( + VkInstance instance, VkDebugUtilsMessengerEXT debugMessenger, VkAllocationCallbacks* allocation_callbacks) { + + if (detail::vulkan_functions().fp_vkDestroyDebugUtilsMessengerEXT != nullptr) { + detail::vulkan_functions().fp_vkDestroyDebugUtilsMessengerEXT(instance, debugMessenger, allocation_callbacks); + } +} + +namespace detail { +bool check_layer_supported(std::vector const& available_layers, const char* layer_name) { + if (!layer_name) return false; + for (const auto& layer_properties : available_layers) { + if (strcmp(layer_name, layer_properties.layerName) == 0) { + return true; + } + } + return false; +} + +bool check_layers_supported(std::vector const& available_layers, std::vector const& layer_names) { + bool all_found = true; + for (const auto& layer_name : layer_names) { + bool found = check_layer_supported(available_layers, layer_name); + if (!found) all_found = false; + } + return all_found; +} + +bool check_extension_supported(std::vector const& available_extensions, const char* extension_name) { + if (!extension_name) return false; + for (const auto& extension_properties : available_extensions) { + if (strcmp(extension_name, extension_properties.extensionName) == 0) { + return true; + } + } + return false; +} + +bool check_extensions_supported( + std::vector const& available_extensions, std::vector const& extension_names) { + bool all_found = true; + for (const auto& extension_name : extension_names) { + bool found = check_extension_supported(available_extensions, extension_name); + if (!found) all_found = false; + } + return all_found; +} + +template void setup_pNext_chain(T& structure, std::vector const& structs) { + structure.pNext = nullptr; + if (structs.empty()) return; + for (size_t i = 0; i < structs.size() - 1; i++) { + VkBaseOutStructure out_structure{}; + memcpy(&out_structure, structs.at(i), sizeof(VkBaseOutStructure)); +#if !defined(NDEBUG) + assert(out_structure.sType != VK_STRUCTURE_TYPE_APPLICATION_INFO); +#endif + out_structure.pNext = static_cast(structs.at(i + 1)); + memcpy(structs.at(i), &out_structure, sizeof(VkBaseOutStructure)); + } + VkBaseOutStructure out_structure{}; + memcpy(&out_structure, structs.back(), sizeof(VkBaseOutStructure)); + out_structure.pNext = nullptr; +#if !defined(NDEBUG) + assert(out_structure.sType != VK_STRUCTURE_TYPE_APPLICATION_INFO); +#endif + memcpy(structs.back(), &out_structure, sizeof(VkBaseOutStructure)); + structure.pNext = structs.at(0); +} +const char* validation_layer_name = "VK_LAYER_KHRONOS_validation"; + +struct InstanceErrorCategory : std::error_category { + const char* name() const noexcept override { return "vkb_instance"; } + std::string message(int err) const override { return to_string(static_cast(err)); } +}; +const InstanceErrorCategory instance_error_category; + +struct PhysicalDeviceErrorCategory : std::error_category { + const char* name() const noexcept override { return "vkb_physical_device"; } + std::string message(int err) const override { return to_string(static_cast(err)); } +}; +const PhysicalDeviceErrorCategory physical_device_error_category; + +struct QueueErrorCategory : std::error_category { + const char* name() const noexcept override { return "vkb_queue"; } + std::string message(int err) const override { return to_string(static_cast(err)); } +}; +const QueueErrorCategory queue_error_category; + +struct DeviceErrorCategory : std::error_category { + const char* name() const noexcept override { return "vkb_device"; } + std::string message(int err) const override { return to_string(static_cast(err)); } +}; +const DeviceErrorCategory device_error_category; + +struct SwapchainErrorCategory : std::error_category { + const char* name() const noexcept override { return "vbk_swapchain"; } + std::string message(int err) const override { return to_string(static_cast(err)); } +}; +const SwapchainErrorCategory swapchain_error_category; + +} // namespace detail + +std::error_code make_error_code(InstanceError instance_error) { + return { static_cast(instance_error), detail::instance_error_category }; +} +std::error_code make_error_code(PhysicalDeviceError physical_device_error) { + return { static_cast(physical_device_error), detail::physical_device_error_category }; +} +std::error_code make_error_code(QueueError queue_error) { + return { static_cast(queue_error), detail::queue_error_category }; +} +std::error_code make_error_code(DeviceError device_error) { + return { static_cast(device_error), detail::device_error_category }; +} +std::error_code make_error_code(SwapchainError swapchain_error) { + return { static_cast(swapchain_error), detail::swapchain_error_category }; +} +#define CASE_TO_STRING(CATEGORY, TYPE) \ + case CATEGORY::TYPE: \ + return #TYPE; + +const char* to_string(InstanceError err) { + switch (err) { + CASE_TO_STRING(InstanceError, vulkan_unavailable) + CASE_TO_STRING(InstanceError, vulkan_version_unavailable) + CASE_TO_STRING(InstanceError, vulkan_version_1_1_unavailable) + CASE_TO_STRING(InstanceError, vulkan_version_1_2_unavailable) + CASE_TO_STRING(InstanceError, vulkan_version_1_3_unavailable) + CASE_TO_STRING(InstanceError, vulkan_version_1_4_unavailable) + CASE_TO_STRING(InstanceError, failed_create_debug_messenger) + CASE_TO_STRING(InstanceError, failed_create_instance) + CASE_TO_STRING(InstanceError, requested_layers_not_present) + CASE_TO_STRING(InstanceError, requested_extensions_not_present) + CASE_TO_STRING(InstanceError, windowing_extensions_not_present) + default: + return ""; + } +} +const char* to_string(PhysicalDeviceError err) { + switch (err) { + CASE_TO_STRING(PhysicalDeviceError, no_surface_provided) + CASE_TO_STRING(PhysicalDeviceError, failed_enumerate_physical_devices) + CASE_TO_STRING(PhysicalDeviceError, no_physical_devices_found) + CASE_TO_STRING(PhysicalDeviceError, no_suitable_device) + default: + return ""; + } +} +const char* to_string(QueueError err) { + switch (err) { + CASE_TO_STRING(QueueError, present_unavailable) + CASE_TO_STRING(QueueError, graphics_unavailable) + CASE_TO_STRING(QueueError, compute_unavailable) + CASE_TO_STRING(QueueError, transfer_unavailable) + CASE_TO_STRING(QueueError, queue_index_out_of_range) + CASE_TO_STRING(QueueError, invalid_queue_family_index) + default: + return ""; + } +} +const char* to_string(DeviceError err) { + switch (err) { + CASE_TO_STRING(DeviceError, failed_create_device) + default: + return ""; + } +} +const char* to_string(SwapchainError err) { + switch (err) { + CASE_TO_STRING(SwapchainError, surface_handle_not_provided) + CASE_TO_STRING(SwapchainError, failed_query_surface_support_details) + CASE_TO_STRING(SwapchainError, failed_create_swapchain) + CASE_TO_STRING(SwapchainError, failed_get_swapchain_images) + CASE_TO_STRING(SwapchainError, failed_create_swapchain_image_views) + CASE_TO_STRING(SwapchainError, required_min_image_count_too_low) + CASE_TO_STRING(SwapchainError, required_usage_not_supported) + default: + return ""; + } +} + +Result SystemInfo::get_system_info() { + if (!detail::vulkan_functions().init_vulkan_funcs(nullptr)) { + return make_error_code(InstanceError::vulkan_unavailable); + } + return SystemInfo(); +} + +Result SystemInfo::get_system_info(PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr) { + // Using externally provided function pointers, assume the loader is available + if (!detail::vulkan_functions().init_vulkan_funcs(fp_vkGetInstanceProcAddr)) { + return make_error_code(InstanceError::vulkan_unavailable); + } + return SystemInfo(); +} + +SystemInfo::SystemInfo() { + auto available_layers_ret = detail::get_vector( + this->available_layers, detail::vulkan_functions().fp_vkEnumerateInstanceLayerProperties); + if (available_layers_ret != VK_SUCCESS) { + this->available_layers.clear(); + } + + for (auto& layer : this->available_layers) + if (strcmp(layer.layerName, detail::validation_layer_name) == 0) validation_layers_available = true; + + auto available_extensions_ret = detail::get_vector( + this->available_extensions, detail::vulkan_functions().fp_vkEnumerateInstanceExtensionProperties, nullptr); + if (available_extensions_ret != VK_SUCCESS) { + this->available_extensions.clear(); + } + + for (auto& ext : this->available_extensions) { + if (strcmp(ext.extensionName, VK_EXT_DEBUG_UTILS_EXTENSION_NAME) == 0) { + debug_utils_available = true; + } + } + + for (auto& layer : this->available_layers) { + std::vector layer_extensions; + auto layer_extensions_ret = detail::get_vector( + layer_extensions, detail::vulkan_functions().fp_vkEnumerateInstanceExtensionProperties, layer.layerName); + if (layer_extensions_ret == VK_SUCCESS) { + this->available_extensions.insert( + this->available_extensions.end(), layer_extensions.begin(), layer_extensions.end()); + for (auto& ext : layer_extensions) { + if (strcmp(ext.extensionName, VK_EXT_DEBUG_UTILS_EXTENSION_NAME) == 0) { + debug_utils_available = true; + } + } + } + } + + PFN_vkEnumerateInstanceVersion pfn_vkEnumerateInstanceVersion = detail::vulkan_functions().fp_vkEnumerateInstanceVersion; + + if (pfn_vkEnumerateInstanceVersion != nullptr) { + VkResult res = pfn_vkEnumerateInstanceVersion(&instance_api_version); + if (res != VK_SUCCESS) { + instance_api_version = VKB_VK_API_VERSION_1_0; + } + } +} +bool SystemInfo::is_extension_available(const char* extension_name) const { + if (!extension_name) return false; + return detail::check_extension_supported(available_extensions, extension_name); +} +bool SystemInfo::is_layer_available(const char* layer_name) const { + if (!layer_name) return false; + return detail::check_layer_supported(available_layers, layer_name); +} +bool SystemInfo::is_instance_version_available(uint32_t major_api_version, uint32_t minor_api_version) { + return instance_api_version >= VKB_MAKE_VK_VERSION(0, major_api_version, minor_api_version, 0); +} +bool SystemInfo::is_instance_version_available(uint32_t api_version) { return instance_api_version >= api_version; } + +void destroy_surface(Instance const& instance, VkSurfaceKHR surface) { + if (instance.instance != VK_NULL_HANDLE && surface != VK_NULL_HANDLE) { + detail::vulkan_functions().fp_vkDestroySurfaceKHR(instance.instance, surface, instance.allocation_callbacks); + } +} +void destroy_surface(VkInstance instance, VkSurfaceKHR surface, VkAllocationCallbacks* callbacks) { + if (instance != VK_NULL_HANDLE && surface != VK_NULL_HANDLE) { + detail::vulkan_functions().fp_vkDestroySurfaceKHR(instance, surface, callbacks); + } +} +void destroy_instance(Instance const& instance) { + if (instance.instance != VK_NULL_HANDLE) { + if (instance.debug_messenger != VK_NULL_HANDLE) + destroy_debug_utils_messenger(instance.instance, instance.debug_messenger, instance.allocation_callbacks); + detail::vulkan_functions().fp_vkDestroyInstance(instance.instance, instance.allocation_callbacks); + } +} + +Instance::operator VkInstance() const { return this->instance; } + +InstanceDispatchTable Instance::make_table() const { return { instance, fp_vkGetInstanceProcAddr }; } + +InstanceBuilder::InstanceBuilder(PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr) { + info.fp_vkGetInstanceProcAddr = fp_vkGetInstanceProcAddr; +} +InstanceBuilder::InstanceBuilder() {} + +Result InstanceBuilder::build() const { + + auto sys_info_ret = SystemInfo::get_system_info(info.fp_vkGetInstanceProcAddr); + if (!sys_info_ret) return sys_info_ret.error(); + auto system = sys_info_ret.value(); + + uint32_t instance_version = VKB_VK_API_VERSION_1_0; + + if (info.minimum_instance_version > VKB_VK_API_VERSION_1_0 || info.required_api_version > VKB_VK_API_VERSION_1_0) { + PFN_vkEnumerateInstanceVersion pfn_vkEnumerateInstanceVersion = detail::vulkan_functions().fp_vkEnumerateInstanceVersion; + + if (pfn_vkEnumerateInstanceVersion != nullptr) { + VkResult res = pfn_vkEnumerateInstanceVersion(&instance_version); + // Should always return VK_SUCCESS + if (res != VK_SUCCESS && (info.required_api_version > 0 || info.minimum_instance_version > 0)) { + return make_error_code(InstanceError::vulkan_version_unavailable); + } + } + if (pfn_vkEnumerateInstanceVersion == nullptr || + (info.minimum_instance_version > 0 && instance_version < info.minimum_instance_version) || + (info.minimum_instance_version == 0 && instance_version < info.required_api_version)) { + + uint32_t version_error = info.minimum_instance_version == 0 ? info.required_api_version : info.minimum_instance_version; + if (VK_VERSION_MINOR(version_error) == 4) + return make_error_code(InstanceError::vulkan_version_1_4_unavailable); + else if (VK_VERSION_MINOR(version_error) == 3) + return make_error_code(InstanceError::vulkan_version_1_3_unavailable); + else if (VK_VERSION_MINOR(version_error) == 2) + return make_error_code(InstanceError::vulkan_version_1_2_unavailable); + else if (VK_VERSION_MINOR(version_error) == 1) + return make_error_code(InstanceError::vulkan_version_1_1_unavailable); + else + return make_error_code(InstanceError::vulkan_version_unavailable); + } + } + + // The API version to use is set by required_api_version, unless it isn't set, then it comes from minimum_instance_version + uint32_t api_version = VKB_VK_API_VERSION_1_0; + if (info.required_api_version > VKB_VK_API_VERSION_1_0) { + api_version = info.required_api_version; + } else if (info.minimum_instance_version > 0) { + api_version = info.minimum_instance_version; + } + + VkApplicationInfo app_info = {}; + app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; + app_info.pNext = nullptr; + app_info.pApplicationName = info.app_name != nullptr ? info.app_name : ""; + app_info.applicationVersion = info.application_version; + app_info.pEngineName = info.engine_name != nullptr ? info.engine_name : ""; + app_info.engineVersion = info.engine_version; + app_info.apiVersion = api_version; + + std::vector extensions; + std::vector layers; + + for (auto& ext : info.extensions) + extensions.push_back(ext); + if (info.debug_callback != nullptr && info.use_debug_messenger && system.debug_utils_available) { + extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + } + bool properties2_ext_enabled = + api_version < VKB_VK_API_VERSION_1_1 && detail::check_extension_supported(system.available_extensions, + VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); + if (properties2_ext_enabled) { + extensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); + } + + if (info.layer_settings.size() > 0) { + extensions.push_back(VK_EXT_LAYER_SETTINGS_EXTENSION_NAME); + } + +#if defined(VK_KHR_portability_enumeration) + bool portability_enumeration_support = + detail::check_extension_supported(system.available_extensions, VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME); + if (portability_enumeration_support) { + extensions.push_back(VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME); + } +#else + bool portability_enumeration_support = false; +#endif + if (!info.headless_context) { + auto check_add_window_ext = [&](const char* name) -> bool { + if (!detail::check_extension_supported(system.available_extensions, name)) return false; + extensions.push_back(name); + return true; + }; + bool khr_surface_added = check_add_window_ext("VK_KHR_surface"); +#if defined(_WIN32) + bool added_window_exts = check_add_window_ext("VK_KHR_win32_surface"); +#elif defined(__ANDROID__) + bool added_window_exts = check_add_window_ext("VK_KHR_android_surface"); +#elif defined(_DIRECT2DISPLAY) + bool added_window_exts = check_add_window_ext("VK_KHR_display"); +#elif defined(__linux__) || defined(__FreeBSD__) + // make sure all three calls to check_add_window_ext, don't allow short circuiting + bool added_window_exts = check_add_window_ext("VK_KHR_xcb_surface"); + added_window_exts = check_add_window_ext("VK_KHR_xlib_surface") || added_window_exts; + added_window_exts = check_add_window_ext("VK_KHR_wayland_surface") || added_window_exts; +#elif defined(__APPLE__) + bool added_window_exts = check_add_window_ext("VK_EXT_metal_surface"); +#endif + if (!khr_surface_added || !added_window_exts) + return make_error_code(InstanceError::windowing_extensions_not_present); + } + bool all_extensions_supported = detail::check_extensions_supported(system.available_extensions, extensions); + if (!all_extensions_supported) { + return make_error_code(InstanceError::requested_extensions_not_present); + } + + for (auto& layer : info.layers) + layers.push_back(layer); + + if (info.enable_validation_layers || (info.request_validation_layers && system.validation_layers_available)) { + layers.push_back(detail::validation_layer_name); + } + bool all_layers_supported = detail::check_layers_supported(system.available_layers, layers); + if (!all_layers_supported) { + return make_error_code(InstanceError::requested_layers_not_present); + } + + std::vector pNext_chain; + + VkDebugUtilsMessengerCreateInfoEXT messengerCreateInfo = {}; + if (info.use_debug_messenger) { + messengerCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; + messengerCreateInfo.pNext = nullptr; + messengerCreateInfo.messageSeverity = info.debug_message_severity; + messengerCreateInfo.messageType = info.debug_message_type; + messengerCreateInfo.pfnUserCallback = info.debug_callback; + messengerCreateInfo.pUserData = info.debug_user_data_pointer; + pNext_chain.push_back(&messengerCreateInfo); + } + + VkValidationFeaturesEXT features{}; + if (info.enabled_validation_features.size() != 0 || info.disabled_validation_features.size()) { + features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT; + features.pNext = nullptr; + features.enabledValidationFeatureCount = static_cast(info.enabled_validation_features.size()); + features.pEnabledValidationFeatures = info.enabled_validation_features.data(); + features.disabledValidationFeatureCount = static_cast(info.disabled_validation_features.size()); + features.pDisabledValidationFeatures = info.disabled_validation_features.data(); + pNext_chain.push_back(&features); + } + + VkValidationFlagsEXT checks{}; + if (info.disabled_validation_checks.size() != 0) { + checks.sType = VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT; + checks.pNext = nullptr; + checks.disabledValidationCheckCount = static_cast(info.disabled_validation_checks.size()); + checks.pDisabledValidationChecks = info.disabled_validation_checks.data(); + pNext_chain.push_back(&checks); + } + + VkLayerSettingsCreateInfoEXT layer_settings_ci{}; + if (info.layer_settings.size() > 0) { + layer_settings_ci.sType = VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT; + layer_settings_ci.pNext = nullptr; + layer_settings_ci.settingCount = static_cast(info.layer_settings.size()); + layer_settings_ci.pSettings = info.layer_settings.data(); + pNext_chain.push_back(&layer_settings_ci); + } + + VkInstanceCreateInfo instance_create_info = {}; + instance_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; + detail::setup_pNext_chain(instance_create_info, pNext_chain); + + instance_create_info.flags = info.flags; + instance_create_info.pApplicationInfo = &app_info; + instance_create_info.enabledExtensionCount = static_cast(extensions.size()); + instance_create_info.ppEnabledExtensionNames = extensions.data(); + instance_create_info.enabledLayerCount = static_cast(layers.size()); + instance_create_info.ppEnabledLayerNames = layers.data(); +#if defined(VK_KHR_portability_enumeration) + if (portability_enumeration_support) { + instance_create_info.flags |= VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR; + } +#endif + + Instance instance; + VkResult res = + detail::vulkan_functions().fp_vkCreateInstance(&instance_create_info, info.allocation_callbacks, &instance.instance); + if (res != VK_SUCCESS) return Result(InstanceError::failed_create_instance, res); + + detail::vulkan_functions().init_instance_funcs(instance.instance); + + if (info.use_debug_messenger) { + res = create_debug_utils_messenger(instance.instance, + info.debug_callback, + info.debug_message_severity, + info.debug_message_type, + info.debug_user_data_pointer, + &instance.debug_messenger, + info.allocation_callbacks); + if (res != VK_SUCCESS) { + return Result(InstanceError::failed_create_debug_messenger, res); + } + } + + instance.headless = info.headless_context; + instance.properties2_ext_enabled = properties2_ext_enabled; + instance.allocation_callbacks = info.allocation_callbacks; + instance.instance_version = instance_version; + instance.api_version = api_version; + instance.fp_vkGetInstanceProcAddr = detail::vulkan_functions().ptr_vkGetInstanceProcAddr; + instance.fp_vkGetDeviceProcAddr = detail::vulkan_functions().fp_vkGetDeviceProcAddr; + return instance; +} + +InstanceBuilder& InstanceBuilder::set_app_name(const char* app_name) { + if (!app_name) return *this; + info.app_name = app_name; + return *this; +} +InstanceBuilder& InstanceBuilder::set_engine_name(const char* engine_name) { + if (!engine_name) return *this; + info.engine_name = engine_name; + return *this; +} +InstanceBuilder& InstanceBuilder::set_app_version(uint32_t app_version) { + info.application_version = app_version; + return *this; +} +InstanceBuilder& InstanceBuilder::set_app_version(uint32_t major, uint32_t minor, uint32_t patch) { + info.application_version = VKB_MAKE_VK_VERSION(0, major, minor, patch); + return *this; +} +InstanceBuilder& InstanceBuilder::set_engine_version(uint32_t engine_version) { + info.engine_version = engine_version; + return *this; +} +InstanceBuilder& InstanceBuilder::set_engine_version(uint32_t major, uint32_t minor, uint32_t patch) { + info.engine_version = VKB_MAKE_VK_VERSION(0, major, minor, patch); + return *this; +} +InstanceBuilder& InstanceBuilder::require_api_version(uint32_t required_api_version) { + info.required_api_version = required_api_version; + return *this; +} +InstanceBuilder& InstanceBuilder::require_api_version(uint32_t major, uint32_t minor, uint32_t patch) { + info.required_api_version = VKB_MAKE_VK_VERSION(0, major, minor, patch); + return *this; +} +InstanceBuilder& InstanceBuilder::set_minimum_instance_version(uint32_t minimum_instance_version) { + info.minimum_instance_version = minimum_instance_version; + return *this; +} +InstanceBuilder& InstanceBuilder::set_minimum_instance_version(uint32_t major, uint32_t minor, uint32_t patch) { + info.minimum_instance_version = VKB_MAKE_VK_VERSION(0, major, minor, patch); + return *this; +} +InstanceBuilder& InstanceBuilder::enable_layer(const char* layer_name) { + if (!layer_name) return *this; + info.layers.push_back(layer_name); + return *this; +} +InstanceBuilder& InstanceBuilder::enable_extension(const char* extension_name) { + if (!extension_name) return *this; + info.extensions.push_back(extension_name); + return *this; +} +InstanceBuilder& InstanceBuilder::enable_extensions(size_t count, const char* const* extensions) { + if (!extensions || count == 0) return *this; + for (size_t i = 0; i < count; i++) { + info.extensions.push_back(extensions[i]); + } + return *this; +} +InstanceBuilder& InstanceBuilder::enable_validation_layers(bool enable_validation) { + info.enable_validation_layers = enable_validation; + return *this; +} +InstanceBuilder& InstanceBuilder::request_validation_layers(bool enable_validation) { + info.request_validation_layers = enable_validation; + return *this; +} + +InstanceBuilder& InstanceBuilder::use_default_debug_messenger() { + info.use_debug_messenger = true; + info.debug_callback = default_debug_callback; + return *this; +} +InstanceBuilder& InstanceBuilder::set_debug_callback(PFN_vkDebugUtilsMessengerCallbackEXT callback) { + info.use_debug_messenger = true; + info.debug_callback = callback; + return *this; +} +InstanceBuilder& InstanceBuilder::set_debug_callback_user_data_pointer(void* user_data_pointer) { + info.debug_user_data_pointer = user_data_pointer; + return *this; +} +InstanceBuilder& InstanceBuilder::set_headless(bool headless) { + info.headless_context = headless; + return *this; +} +InstanceBuilder& InstanceBuilder::set_debug_messenger_severity(VkDebugUtilsMessageSeverityFlagsEXT severity) { + info.debug_message_severity = severity; + return *this; +} +InstanceBuilder& InstanceBuilder::add_debug_messenger_severity(VkDebugUtilsMessageSeverityFlagsEXT severity) { + info.debug_message_severity = info.debug_message_severity | severity; + return *this; +} +InstanceBuilder& InstanceBuilder::set_debug_messenger_type(VkDebugUtilsMessageTypeFlagsEXT type) { + info.debug_message_type = type; + return *this; +} +InstanceBuilder& InstanceBuilder::add_debug_messenger_type(VkDebugUtilsMessageTypeFlagsEXT type) { + info.debug_message_type = info.debug_message_type | type; + return *this; +} +InstanceBuilder& InstanceBuilder::add_validation_disable(VkValidationCheckEXT check) { + info.disabled_validation_checks.push_back(check); + return *this; +} +InstanceBuilder& InstanceBuilder::add_validation_feature_enable(VkValidationFeatureEnableEXT enable) { + info.enabled_validation_features.push_back(enable); + return *this; +} +InstanceBuilder& InstanceBuilder::add_validation_feature_disable(VkValidationFeatureDisableEXT disable) { + info.disabled_validation_features.push_back(disable); + return *this; +} +InstanceBuilder& InstanceBuilder::set_allocation_callbacks(VkAllocationCallbacks* callbacks) { + info.allocation_callbacks = callbacks; + return *this; +} +InstanceBuilder& InstanceBuilder::add_layer_setting(VkLayerSettingEXT setting) { + info.layer_settings.push_back(setting); + return *this; +} + +void destroy_debug_messenger(VkInstance const instance, VkDebugUtilsMessengerEXT const messenger); + + +// ---- Physical Device ---- // + +namespace detail { + +std::vector find_unsupported_extensions_in_list( + std::vector const& available_extensions, std::vector const& required_extensions) { + std::vector unavailable_extensions; + + for (auto& req_ext : required_extensions) { + if (!std::binary_search(available_extensions.begin(), available_extensions.end(), req_ext)) { + unavailable_extensions.push_back(req_ext); + } + } + return unavailable_extensions; +} + +// Finds the first queue which supports the desired operations. Returns QUEUE_INDEX_MAX_VALUE if none is found +uint32_t get_first_queue_index(std::vector const& families, VkQueueFlags desired_flags) { + for (uint32_t i = 0; i < static_cast(families.size()); i++) { + if ((families[i].queueFlags & desired_flags) == desired_flags) return i; + } + return QUEUE_INDEX_MAX_VALUE; +} +// Finds the queue which is separate from the graphics queue and has the desired flag and not the +// undesired flag, but will select it if no better options are available compute support. Returns +// QUEUE_INDEX_MAX_VALUE if none is found. +uint32_t get_separate_queue_index( + std::vector const& families, VkQueueFlags desired_flags, VkQueueFlags undesired_flags) { + uint32_t index = QUEUE_INDEX_MAX_VALUE; + for (uint32_t i = 0; i < static_cast(families.size()); i++) { + if ((families[i].queueFlags & desired_flags) == desired_flags && ((families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) == 0)) { + if ((families[i].queueFlags & undesired_flags) == 0) { + return i; + } else { + index = i; + } + } + } + return index; +} + +// finds the first queue which supports only the desired flag (not graphics or transfer). Returns QUEUE_INDEX_MAX_VALUE if none is found. +uint32_t get_dedicated_queue_index( + std::vector const& families, VkQueueFlags desired_flags, VkQueueFlags undesired_flags) { + for (uint32_t i = 0; i < static_cast(families.size()); i++) { + if ((families[i].queueFlags & desired_flags) == desired_flags && + (families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) == 0 && (families[i].queueFlags & undesired_flags) == 0) + return i; + } + return QUEUE_INDEX_MAX_VALUE; +} + +// finds the first queue which supports presenting. returns QUEUE_INDEX_MAX_VALUE if none is found +uint32_t get_present_queue_index( + VkPhysicalDevice const phys_device, VkSurfaceKHR const surface, std::vector const& families) { + for (uint32_t i = 0; i < static_cast(families.size()); i++) { + VkBool32 presentSupport = VK_FALSE; + if (surface != VK_NULL_HANDLE) { + VkResult res = detail::vulkan_functions().fp_vkGetPhysicalDeviceSurfaceSupportKHR(phys_device, i, surface, &presentSupport); + if (res != VK_SUCCESS) return QUEUE_INDEX_MAX_VALUE; // TODO: determine if this should fail another way + } + if (presentSupport == VK_TRUE) return i; + } + return QUEUE_INDEX_MAX_VALUE; +} +} // namespace detail + +PhysicalDevice PhysicalDeviceSelector::populate_device_details( + VkPhysicalDevice vk_phys_device, detail::FeaturesChain const& src_extended_features_chain) const { + PhysicalDevice physical_device{}; + physical_device.physical_device = vk_phys_device; + physical_device.surface = instance_info.surface; + physical_device.defer_surface_initialization = criteria.defer_surface_initialization; + physical_device.instance_version = instance_info.version; + auto queue_families = detail::get_vector_noerror( + detail::vulkan_functions().fp_vkGetPhysicalDeviceQueueFamilyProperties, vk_phys_device); + physical_device.queue_families = queue_families; + + detail::vulkan_functions().fp_vkGetPhysicalDeviceProperties(vk_phys_device, &physical_device.properties); + detail::vulkan_functions().fp_vkGetPhysicalDeviceFeatures(vk_phys_device, &physical_device.features); + detail::vulkan_functions().fp_vkGetPhysicalDeviceMemoryProperties(vk_phys_device, &physical_device.memory_properties); + + physical_device.name = physical_device.properties.deviceName; + + std::vector available_extensions; + auto available_extensions_ret = detail::get_vector( + available_extensions, detail::vulkan_functions().fp_vkEnumerateDeviceExtensionProperties, vk_phys_device, nullptr); + if (available_extensions_ret != VK_SUCCESS) return physical_device; + for (const auto& ext : available_extensions) { + physical_device.available_extensions.push_back(&ext.extensionName[0]); + } + // Lets us quickly find extensions as this list can be 300+ elements long + std::sort(physical_device.available_extensions.begin(), physical_device.available_extensions.end()); + + physical_device.properties2_ext_enabled = instance_info.properties2_ext_enabled; + + auto fill_chain = src_extended_features_chain; + + bool instance_is_1_1 = instance_info.version >= VKB_VK_API_VERSION_1_1; + if (!fill_chain.empty() && (instance_is_1_1 || instance_info.properties2_ext_enabled)) { + VkPhysicalDeviceFeatures2 local_features{}; + fill_chain.create_chained_features(local_features); + // Use KHR function if not able to use the core function + if (instance_is_1_1) { + detail::vulkan_functions().fp_vkGetPhysicalDeviceFeatures2(vk_phys_device, &local_features); + } else { + detail::vulkan_functions().fp_vkGetPhysicalDeviceFeatures2KHR(vk_phys_device, &local_features); + } + physical_device.extended_features_chain = std::move(fill_chain); + } + + return physical_device; +} + +PhysicalDevice::Suitable PhysicalDeviceSelector::is_device_suitable( + PhysicalDevice const& pd, std::vector& unsuitability_reasons) const { + PhysicalDevice::Suitable suitable = PhysicalDevice::Suitable::yes; + + if (criteria.name.size() > 0 && criteria.name != pd.properties.deviceName) { + unsuitability_reasons.push_back( + "VkPhysicalDeviceProperties::deviceName doesn't match requested name \"" + criteria.name + "\""); + return PhysicalDevice::Suitable::no; + } + + if (criteria.required_version > pd.properties.apiVersion) { + unsuitability_reasons.push_back( + "VkPhysicalDeviceProperties::apiVersion " + std::to_string(VK_API_VERSION_MAJOR(pd.properties.apiVersion)) + + "." + std::to_string(VK_API_VERSION_MINOR(pd.properties.apiVersion)) + " lower than required version " + + std::to_string(VK_API_VERSION_MAJOR(criteria.required_version)) + "." + + std::to_string(VK_API_VERSION_MINOR(criteria.required_version))); + return PhysicalDevice::Suitable::no; + } + + bool dedicated_compute = detail::get_dedicated_queue_index(pd.queue_families, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_TRANSFER_BIT) != + detail::QUEUE_INDEX_MAX_VALUE; + bool dedicated_transfer = detail::get_dedicated_queue_index(pd.queue_families, VK_QUEUE_TRANSFER_BIT, VK_QUEUE_COMPUTE_BIT) != + detail::QUEUE_INDEX_MAX_VALUE; + bool separate_compute = detail::get_separate_queue_index(pd.queue_families, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_TRANSFER_BIT) != + detail::QUEUE_INDEX_MAX_VALUE; + bool separate_transfer = detail::get_separate_queue_index(pd.queue_families, VK_QUEUE_TRANSFER_BIT, VK_QUEUE_COMPUTE_BIT) != + detail::QUEUE_INDEX_MAX_VALUE; + + bool present_queue = detail::get_present_queue_index(pd.physical_device, instance_info.surface, pd.queue_families) != + detail::QUEUE_INDEX_MAX_VALUE; + + if (criteria.require_dedicated_compute_queue && !dedicated_compute) { + unsuitability_reasons.push_back("No dedicated compute queue"); + return PhysicalDevice::Suitable::no; + } + if (criteria.require_dedicated_transfer_queue && !dedicated_transfer) { + unsuitability_reasons.push_back("No dedicated transfer queue"); + return PhysicalDevice::Suitable::no; + } + if (criteria.require_separate_compute_queue && !separate_compute) { + unsuitability_reasons.push_back("No separate compute queue"); + return PhysicalDevice::Suitable::no; + } + if (criteria.require_separate_transfer_queue && !separate_transfer) { + unsuitability_reasons.push_back("No separate transfer queue"); + return PhysicalDevice::Suitable::no; + } + if (criteria.require_present && !present_queue && !criteria.defer_surface_initialization) { + unsuitability_reasons.push_back("No queue capable of present operations"); + return PhysicalDevice::Suitable::no; + } + const auto unsupported_extensions = + detail::find_unsupported_extensions_in_list(pd.available_extensions, criteria.required_extensions); + if (unsupported_extensions.size() > 0) { + for (auto const& unsupported_ext : unsupported_extensions) { + unsuitability_reasons.push_back("Device extension " + unsupported_ext + " not supported"); + } + return PhysicalDevice::Suitable::no; + } + if (!criteria.defer_surface_initialization && criteria.require_present) { + std::vector formats; + std::vector present_modes; + + auto formats_ret = detail::get_vector(formats, + detail::vulkan_functions().fp_vkGetPhysicalDeviceSurfaceFormatsKHR, + pd.physical_device, + instance_info.surface); + auto present_modes_ret = detail::get_vector(present_modes, + detail::vulkan_functions().fp_vkGetPhysicalDeviceSurfacePresentModesKHR, + pd.physical_device, + instance_info.surface); + + if (formats_ret != VK_SUCCESS || present_modes_ret != VK_SUCCESS || formats.empty() || present_modes.empty()) { + if (formats_ret != VK_SUCCESS) { + unsuitability_reasons.push_back( + "vkGetPhysicalDeviceSurfaceFormatsKHR returned error code " + std::to_string(formats_ret)); + } + if (present_modes_ret != VK_SUCCESS) { + unsuitability_reasons.push_back( + "vkGetPhysicalDeviceSurfacePresentModesKHR returned error code " + std::to_string(present_modes_ret)); + } + if (formats.empty()) { + unsuitability_reasons.push_back("vkGetPhysicalDeviceSurfaceFormatsKHR returned zero surface formats"); + } + if (present_modes.empty()) { + unsuitability_reasons.push_back( + "vkGetPhysicalDeviceSurfacePresentModesKHR returned zero present modes"); + } + return PhysicalDevice::Suitable::no; + } + } + + if (pd.properties.deviceType != static_cast(criteria.preferred_type)) { + if (criteria.allow_any_type) { + suitable = PhysicalDevice::Suitable::partial; + } else { + suitable = PhysicalDevice::Suitable::no; + } + } + + detail::compare_VkPhysicalDeviceFeatures(unsuitability_reasons, pd.features, criteria.required_features); + pd.extended_features_chain.match_all(unsuitability_reasons, criteria.extended_features_chain); + if (!unsuitability_reasons.empty()) { + return PhysicalDevice::Suitable::no; + } + + for (uint32_t i = 0; i < pd.memory_properties.memoryHeapCount; i++) { + if (pd.memory_properties.memoryHeaps[i].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) { + if (pd.memory_properties.memoryHeaps[i].size < criteria.required_mem_size) { + unsuitability_reasons.push_back("Did not contain a Device Local memory heap with enough size"); + return PhysicalDevice::Suitable::no; + } + } + } + + return suitable; +} +// delegate construction to the one with an explicit surface parameter +PhysicalDeviceSelector::PhysicalDeviceSelector(Instance const& instance) +: PhysicalDeviceSelector(instance, VK_NULL_HANDLE) {} + +PhysicalDeviceSelector::PhysicalDeviceSelector(Instance const& instance, VkSurfaceKHR surface) { + instance_info.instance = instance.instance; + instance_info.version = instance.instance_version; + instance_info.properties2_ext_enabled = instance.properties2_ext_enabled; + instance_info.surface = surface; + criteria.require_present = !instance.headless; + criteria.required_version = instance.api_version; +} + +// Return all devices which are considered suitable - intended for applications which want to let the user pick the physical device +Result> PhysicalDeviceSelector::select_devices() const { + if (criteria.require_present && !criteria.defer_surface_initialization) { + if (instance_info.surface == VK_NULL_HANDLE) + return Result>{ PhysicalDeviceError::no_surface_provided }; + } + + // Get the VkPhysicalDevice handles on the system + std::vector vk_physical_devices; + + auto vk_physical_devices_ret = detail::get_vector( + vk_physical_devices, detail::vulkan_functions().fp_vkEnumeratePhysicalDevices, instance_info.instance); + if (vk_physical_devices_ret != VK_SUCCESS) { + return Result>{ PhysicalDeviceError::failed_enumerate_physical_devices, vk_physical_devices_ret }; + } + if (vk_physical_devices.empty()) { + return Result>{ PhysicalDeviceError::no_physical_devices_found }; + } + + auto fill_out_phys_dev_with_criteria = [&](PhysicalDevice& phys_dev) { + phys_dev.features = criteria.required_features; + phys_dev.extended_features_chain = criteria.extended_features_chain; + + bool portability_ext_available = + criteria.enable_portability_subset && + std::binary_search(phys_dev.available_extensions.begin(), phys_dev.available_extensions.end(), "VK_KHR_portability_subset"); + + phys_dev.extensions_to_enable.clear(); + phys_dev.extensions_to_enable.insert( + phys_dev.extensions_to_enable.end(), criteria.required_extensions.begin(), criteria.required_extensions.end()); + if (portability_ext_available) { + phys_dev.extensions_to_enable.push_back("VK_KHR_portability_subset"); + } + // Lets us quickly find extensions as this list can be 300+ elements long + std::sort(phys_dev.extensions_to_enable.begin(), phys_dev.extensions_to_enable.end()); + }; + + // if this option is set, always return only the first physical device found + if (criteria.use_first_gpu_unconditionally && vk_physical_devices.size() > 0) { + PhysicalDevice physical_device = populate_device_details(vk_physical_devices[0], criteria.extended_features_chain); + fill_out_phys_dev_with_criteria(physical_device); + return std::vector{ physical_device }; + } + + // Populate their details and check their suitability + std::vector unsuitability_reasons; + std::vector physical_devices; + for (auto& vk_physical_device : vk_physical_devices) { + PhysicalDevice phys_dev = populate_device_details(vk_physical_device, criteria.extended_features_chain); + std::vector gpu_unsuitability_reasons; + phys_dev.suitable = is_device_suitable(phys_dev, gpu_unsuitability_reasons); + if (phys_dev.suitable != PhysicalDevice::Suitable::no) { + physical_devices.push_back(phys_dev); + } else { + for (auto const& reason : gpu_unsuitability_reasons) { + unsuitability_reasons.push_back( + std::string("Physical Device ") + phys_dev.properties.deviceName + " not selected due to: " + reason); + } + } + } + + // No suitable devices found, return an error which contains the list of reason why it wasn't suitable + if (physical_devices.empty()) { + return Result>{ PhysicalDeviceError::no_suitable_device, unsuitability_reasons }; + } + + // sort the list into fully and partially suitable devices. use stable_partition to maintain relative order + std::stable_partition(physical_devices.begin(), physical_devices.end(), [](auto const& pd) { + return pd.suitable == PhysicalDevice::Suitable::yes; + }); + + // Make the physical device ready to be used to create a Device from it + for (auto& physical_device : physical_devices) { + fill_out_phys_dev_with_criteria(physical_device); + } + + return physical_devices; +} + +Result PhysicalDeviceSelector::select() const { + auto const selected_devices = select_devices(); + + if (!selected_devices) return Result{ selected_devices.full_error() }; + return selected_devices.value().at(0); +} + +Result> PhysicalDeviceSelector::select_device_names() const { + auto const selected_devices = select_devices(); + if (!selected_devices) return Result>{ selected_devices.full_error() }; + + std::vector names; + for (const auto& pd : selected_devices.value()) { + names.push_back(pd.name); + } + return names; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::set_surface(VkSurfaceKHR surface) { + instance_info.surface = surface; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::set_name(std::string const& name) { + criteria.name = name; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::prefer_gpu_device_type(PreferredDeviceType type) { + criteria.preferred_type = type; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::allow_any_gpu_device_type(bool allow_any_type) { + criteria.allow_any_type = allow_any_type; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::require_present(bool require) { + criteria.require_present = require; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::require_dedicated_transfer_queue() { + criteria.require_dedicated_transfer_queue = true; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::require_dedicated_compute_queue() { + criteria.require_dedicated_compute_queue = true; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::require_separate_transfer_queue() { + criteria.require_separate_transfer_queue = true; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::require_separate_compute_queue() { + criteria.require_separate_compute_queue = true; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::required_device_memory_size(VkDeviceSize size) { + criteria.required_mem_size = size; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::add_required_extension(const char* extension) { + criteria.required_extensions.push_back(extension); + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::add_required_extensions(size_t count, const char* const* extensions) { + if (!extensions || count == 0) return *this; + for (size_t i = 0; i < count; i++) { + criteria.required_extensions.push_back(extensions[i]); + } + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::set_minimum_version(uint32_t major, uint32_t minor) { + criteria.required_version = VKB_MAKE_VK_VERSION(0, major, minor, 0); + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::disable_portability_subset() { + criteria.enable_portability_subset = false; + return *this; +} + +PhysicalDeviceSelector& PhysicalDeviceSelector::set_required_features(VkPhysicalDeviceFeatures const& features) { + detail::merge_VkPhysicalDeviceFeatures(criteria.required_features, features); + return *this; +} +#if defined(VKB_VK_API_VERSION_1_2) +// The implementation of the set_required_features_1X functions sets the sType manually. This was a poor choice since +// users of Vulkan should expect to fill out their structs properly. To make the functions take the struct parameter by +// const reference, a local copy must be made in order to set the sType. +PhysicalDeviceSelector& PhysicalDeviceSelector::set_required_features_11(VkPhysicalDeviceVulkan11Features const& features_11) { + VkPhysicalDeviceVulkan11Features features_11_copy = features_11; + features_11_copy.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES; + add_required_extension_features(features_11_copy); + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::set_required_features_12(VkPhysicalDeviceVulkan12Features const& features_12) { + VkPhysicalDeviceVulkan12Features features_12_copy = features_12; + features_12_copy.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES; + add_required_extension_features(features_12_copy); + return *this; +} +#endif +#if defined(VKB_VK_API_VERSION_1_3) +PhysicalDeviceSelector& PhysicalDeviceSelector::set_required_features_13(VkPhysicalDeviceVulkan13Features const& features_13) { + VkPhysicalDeviceVulkan13Features features_13_copy = features_13; + features_13_copy.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES; + add_required_extension_features(features_13_copy); + return *this; +} +#endif +#if defined(VKB_VK_API_VERSION_1_4) +PhysicalDeviceSelector& PhysicalDeviceSelector::set_required_features_14(VkPhysicalDeviceVulkan14Features const& features_14) { + VkPhysicalDeviceVulkan14Features features_14_copy = features_14; + features_14_copy.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_FEATURES; + add_required_extension_features(features_14_copy); + return *this; +} +#endif +PhysicalDeviceSelector& PhysicalDeviceSelector::defer_surface_initialization() { + criteria.defer_surface_initialization = true; + return *this; +} +PhysicalDeviceSelector& PhysicalDeviceSelector::select_first_device_unconditionally(bool unconditionally) { + criteria.use_first_gpu_unconditionally = unconditionally; + return *this; +} + +// PhysicalDevice +bool PhysicalDevice::has_dedicated_compute_queue() const { + return detail::get_dedicated_queue_index(queue_families, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_TRANSFER_BIT) != detail::QUEUE_INDEX_MAX_VALUE; +} +bool PhysicalDevice::has_separate_compute_queue() const { + return detail::get_separate_queue_index(queue_families, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_TRANSFER_BIT) != detail::QUEUE_INDEX_MAX_VALUE; +} +bool PhysicalDevice::has_dedicated_transfer_queue() const { + return detail::get_dedicated_queue_index(queue_families, VK_QUEUE_TRANSFER_BIT, VK_QUEUE_COMPUTE_BIT) != detail::QUEUE_INDEX_MAX_VALUE; +} +bool PhysicalDevice::has_separate_transfer_queue() const { + return detail::get_separate_queue_index(queue_families, VK_QUEUE_TRANSFER_BIT, VK_QUEUE_COMPUTE_BIT) != detail::QUEUE_INDEX_MAX_VALUE; +} +std::vector PhysicalDevice::get_queue_families() const { return queue_families; } +std::vector PhysicalDevice::get_extensions() const { return extensions_to_enable; } +std::vector PhysicalDevice::get_available_extensions() const { return available_extensions; } +bool PhysicalDevice::is_extension_present(const char* ext) const { + return std::binary_search(std::begin(available_extensions), std::end(available_extensions), ext); +} +bool PhysicalDevice::enable_extension_if_present(const char* extension) { + if (std::binary_search(std::begin(available_extensions), std::end(available_extensions), extension)) { + extensions_to_enable.insert( + std::upper_bound(std::begin(extensions_to_enable), std::end(extensions_to_enable), extension), extension); + return true; + } + return false; +} +bool PhysicalDevice::enable_extensions_if_present(size_t count, const char* const* extensions) { + for (size_t i = 0; i < count; ++i) { + const auto extension = extensions[i]; + if (!std::binary_search(std::begin(available_extensions), std::end(available_extensions), extension)) { + return false; + } + } + + for (size_t i = 0; i < count; ++i) { + extensions_to_enable.insert( + std::upper_bound(std::begin(extensions_to_enable), std::end(extensions_to_enable), extensions[i]), extensions[i]); + } + return true; +} + +bool PhysicalDevice::enable_features_if_present(const VkPhysicalDeviceFeatures& features_to_enable) { + VkPhysicalDeviceFeatures actual_pdf{}; + detail::vulkan_functions().fp_vkGetPhysicalDeviceFeatures(physical_device, &actual_pdf); + + std::vector unsupported_features; + detail::compare_VkPhysicalDeviceFeatures(unsupported_features, actual_pdf, features_to_enable); + if (unsupported_features.empty()) { + detail::merge_VkPhysicalDeviceFeatures(features, features_to_enable); + return true; + } + return false; +} + +bool PhysicalDevice::enable_features_struct_if_present( + VkStructureType sType, size_t struct_size, const void* features_struct, void* query_struct) { + VkPhysicalDeviceFeatures2 actual_pdf2{}; + actual_pdf2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; + actual_pdf2.pNext = query_struct; + + bool instance_is_1_1 = instance_version >= VKB_VK_API_VERSION_1_1; + if (instance_is_1_1 || properties2_ext_enabled) { + if (instance_is_1_1) { + detail::vulkan_functions().fp_vkGetPhysicalDeviceFeatures2(physical_device, &actual_pdf2); + } else { + detail::vulkan_functions().fp_vkGetPhysicalDeviceFeatures2KHR(physical_device, &actual_pdf2); + } + + std::vector error_list; + detail::compare_feature_struct(sType, error_list, query_struct, features_struct); + + if (error_list.empty()) { + extended_features_chain.add_structure(sType, struct_size, features_struct); + return true; + } + } + return false; +} + + +PhysicalDevice::operator VkPhysicalDevice() const { return this->physical_device; } + +// ---- Queues ---- // + +Result Device::get_queue_index(QueueType type) const { + uint32_t index = detail::QUEUE_INDEX_MAX_VALUE; + switch (type) { + case QueueType::present: + index = detail::get_present_queue_index(physical_device.physical_device, surface, queue_families); + if (index == detail::QUEUE_INDEX_MAX_VALUE) return Result{ QueueError::present_unavailable }; + break; + case QueueType::graphics: + index = detail::get_first_queue_index(queue_families, VK_QUEUE_GRAPHICS_BIT); + if (index == detail::QUEUE_INDEX_MAX_VALUE) return Result{ QueueError::graphics_unavailable }; + break; + case QueueType::compute: + index = detail::get_separate_queue_index(queue_families, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_TRANSFER_BIT); + if (index == detail::QUEUE_INDEX_MAX_VALUE) return Result{ QueueError::compute_unavailable }; + break; + case QueueType::transfer: + index = detail::get_separate_queue_index(queue_families, VK_QUEUE_TRANSFER_BIT, VK_QUEUE_COMPUTE_BIT); + if (index == detail::QUEUE_INDEX_MAX_VALUE) return Result{ QueueError::transfer_unavailable }; + break; + default: + return Result{ QueueError::invalid_queue_family_index }; + } + return index; +} +Result Device::get_dedicated_queue_index(QueueType type) const { + uint32_t index = detail::QUEUE_INDEX_MAX_VALUE; + switch (type) { + case QueueType::compute: + index = detail::get_dedicated_queue_index(queue_families, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_TRANSFER_BIT); + if (index == detail::QUEUE_INDEX_MAX_VALUE) return Result{ QueueError::compute_unavailable }; + break; + case QueueType::transfer: + index = detail::get_dedicated_queue_index(queue_families, VK_QUEUE_TRANSFER_BIT, VK_QUEUE_COMPUTE_BIT); + if (index == detail::QUEUE_INDEX_MAX_VALUE) return Result{ QueueError::transfer_unavailable }; + break; + default: + return Result{ QueueError::invalid_queue_family_index }; + } + return index; +} + +Result Device::get_queue(QueueType type) const { + auto index = get_queue_index(type); + if (!index.has_value()) return { index.error() }; + VkQueue out_queue; + internal_table.fp_vkGetDeviceQueue(device, index.value(), 0, &out_queue); + return out_queue; +} +Result Device::get_dedicated_queue(QueueType type) const { + auto index = get_dedicated_queue_index(type); + if (!index.has_value()) return { index.error() }; + VkQueue out_queue; + internal_table.fp_vkGetDeviceQueue(device, index.value(), 0, &out_queue); + return out_queue; +} + +// ---- Dispatch ---- // + +DispatchTable Device::make_table() const { return { device, fp_vkGetDeviceProcAddr }; } + +// ---- Device ---- // + +Device::operator VkDevice() const { return this->device; } + +void destroy_device(Device const& device) { + if (device.device != VK_NULL_HANDLE) { + device.internal_table.fp_vkDestroyDevice(device.device, device.allocation_callbacks); + } +} + +DeviceBuilder::DeviceBuilder(PhysicalDevice phys_device) { physical_device = std::move(phys_device); } + +Result DeviceBuilder::build() const { + + std::vector queue_descriptions; + queue_descriptions.insert(queue_descriptions.end(), info.queue_descriptions.begin(), info.queue_descriptions.end()); + + if (queue_descriptions.empty()) { + for (uint32_t i = 0; i < physical_device.queue_families.size(); i++) { + queue_descriptions.emplace_back(i, std::vector{ 1.0f }); + } + } + + std::vector queueCreateInfos; + for (auto& desc : queue_descriptions) { + VkDeviceQueueCreateInfo queue_create_info = {}; + queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info.queueFamilyIndex = desc.index; + queue_create_info.queueCount = static_cast(desc.priorities.size()); + queue_create_info.pQueuePriorities = desc.priorities.data(); + queueCreateInfos.push_back(queue_create_info); + } + + std::vector extensions_to_enable; + for (const auto& ext : physical_device.extensions_to_enable) { + extensions_to_enable.push_back(ext.c_str()); + } + if (physical_device.surface != VK_NULL_HANDLE || physical_device.defer_surface_initialization) + extensions_to_enable.push_back({ VK_KHR_SWAPCHAIN_EXTENSION_NAME }); + + std::vector final_pnext_chain; + VkDeviceCreateInfo device_create_info = {}; + + bool user_defined_phys_dev_features_2 = false; + for (auto& pnext : info.pNext_chain) { + VkBaseOutStructure out_structure{}; + memcpy(&out_structure, pnext, sizeof(VkBaseOutStructure)); + if (out_structure.sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2) { + user_defined_phys_dev_features_2 = true; + break; + } + } + + if (user_defined_phys_dev_features_2 && !physical_device.extended_features_chain.empty()) { + return { DeviceError::VkPhysicalDeviceFeatures2_in_pNext_chain_while_using_add_required_extension_features }; + } + + // These objects must be alive during the call to vkCreateDevice + auto physical_device_extension_features_copy = physical_device.extended_features_chain; + VkPhysicalDeviceFeatures2 local_features2{}; + local_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; + local_features2.features = physical_device.features; + + if (!user_defined_phys_dev_features_2) { + if (physical_device.instance_version >= VKB_VK_API_VERSION_1_1 || physical_device.properties2_ext_enabled) { + final_pnext_chain.push_back(&local_features2); + auto features_chain_members = physical_device_extension_features_copy.get_pNext_chain_members(); + for (auto& features_struct : features_chain_members) { + final_pnext_chain.push_back(features_struct); + } + } else { + // Only set device_create_info.pEnabledFeatures when the pNext chain does not contain a VkPhysicalDeviceFeatures2 structure + device_create_info.pEnabledFeatures = &physical_device.features; + } + } + + for (auto& pnext : info.pNext_chain) { + final_pnext_chain.push_back(pnext); + } + + detail::setup_pNext_chain(device_create_info, final_pnext_chain); + + device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; + device_create_info.flags = info.flags; + device_create_info.queueCreateInfoCount = static_cast(queueCreateInfos.size()); + device_create_info.pQueueCreateInfos = queueCreateInfos.data(); + device_create_info.enabledExtensionCount = static_cast(extensions_to_enable.size()); + device_create_info.ppEnabledExtensionNames = extensions_to_enable.data(); + + Device device; + + VkResult res = detail::vulkan_functions().fp_vkCreateDevice( + physical_device.physical_device, &device_create_info, info.allocation_callbacks, &device.device); + if (res != VK_SUCCESS) { + return { DeviceError::failed_create_device, res }; + } + + device.physical_device = physical_device; + device.surface = physical_device.surface; + device.queue_families = physical_device.queue_families; + device.allocation_callbacks = info.allocation_callbacks; + device.fp_vkGetDeviceProcAddr = detail::vulkan_functions().fp_vkGetDeviceProcAddr; + detail::vulkan_functions().get_device_proc_addr(device.device, device.internal_table.fp_vkGetDeviceQueue, "vkGetDeviceQueue"); + detail::vulkan_functions().get_device_proc_addr(device.device, device.internal_table.fp_vkDestroyDevice, "vkDestroyDevice"); + device.instance_version = physical_device.instance_version; + return device; +} +DeviceBuilder& DeviceBuilder::set_allocation_callbacks(VkAllocationCallbacks* callbacks) { + info.allocation_callbacks = callbacks; + return *this; +} +DeviceBuilder& DeviceBuilder::custom_queue_setup(size_t count, CustomQueueDescription const* queue_descriptions) { + info.queue_descriptions.assign(queue_descriptions, queue_descriptions + count); + return *this; +} +DeviceBuilder& DeviceBuilder::custom_queue_setup(std::vector const& queue_descriptions) { + info.queue_descriptions = queue_descriptions; + return *this; +} +DeviceBuilder& DeviceBuilder::custom_queue_setup(std::vector&& queue_descriptions) { + info.queue_descriptions = std::move(queue_descriptions); + return *this; +} +#if VKB_SPAN_OVERLOADS +DeviceBuilder& DeviceBuilder::custom_queue_setup(std::span queue_descriptions) { + info.queue_descriptions.assign(queue_descriptions.begin(), queue_descriptions.end()); + return *this; +} +#endif + +// ---- Swapchain ---- // + +namespace detail { +struct SurfaceSupportDetails { + VkSurfaceCapabilitiesKHR capabilities; + std::vector formats; + std::vector present_modes; +}; + +enum class SurfaceSupportError { + surface_handle_null, + failed_get_surface_capabilities, + failed_enumerate_surface_formats, + failed_enumerate_present_modes, + no_suitable_desired_format +}; + +struct SurfaceSupportErrorCategory : std::error_category { + const char* name() const noexcept override { return "vbk_surface_support"; } + std::string message(int err) const override { + switch (static_cast(err)) { + CASE_TO_STRING(SurfaceSupportError, surface_handle_null) + CASE_TO_STRING(SurfaceSupportError, failed_get_surface_capabilities) + CASE_TO_STRING(SurfaceSupportError, failed_enumerate_surface_formats) + CASE_TO_STRING(SurfaceSupportError, failed_enumerate_present_modes) + CASE_TO_STRING(SurfaceSupportError, no_suitable_desired_format) + default: + return ""; + } + } +}; +const SurfaceSupportErrorCategory surface_support_error_category; + +std::error_code make_error_code(SurfaceSupportError surface_support_error) { + return { static_cast(surface_support_error), detail::surface_support_error_category }; +} + +Result query_surface_support_details(VkPhysicalDevice phys_device, VkSurfaceKHR surface) { + if (surface == VK_NULL_HANDLE) return make_error_code(SurfaceSupportError::surface_handle_null); + + VkSurfaceCapabilitiesKHR capabilities; + VkResult res = detail::vulkan_functions().fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(phys_device, surface, &capabilities); + if (res != VK_SUCCESS) { + return { make_error_code(SurfaceSupportError::failed_get_surface_capabilities), res }; + } + + std::vector formats; + std::vector present_modes; + + auto formats_ret = detail::get_vector( + formats, detail::vulkan_functions().fp_vkGetPhysicalDeviceSurfaceFormatsKHR, phys_device, surface); + if (formats_ret != VK_SUCCESS) + return { make_error_code(SurfaceSupportError::failed_enumerate_surface_formats), formats_ret }; + auto present_modes_ret = detail::get_vector( + present_modes, detail::vulkan_functions().fp_vkGetPhysicalDeviceSurfacePresentModesKHR, phys_device, surface); + if (present_modes_ret != VK_SUCCESS) + return { make_error_code(SurfaceSupportError::failed_enumerate_present_modes), present_modes_ret }; + + return SurfaceSupportDetails{ capabilities, formats, present_modes }; +} + +Result find_desired_surface_format( + std::vector const& available_formats, std::vector const& desired_formats) { + for (auto const& desired_format : desired_formats) { + for (auto const& available_format : available_formats) { + // finds the first format that is desired and available + if (desired_format.format == available_format.format && desired_format.colorSpace == available_format.colorSpace) { + return desired_format; + } + } + } + + // if no desired format is available, we report that no format is suitable to the user request + return { make_error_code(SurfaceSupportError::no_suitable_desired_format) }; +} + +VkSurfaceFormatKHR find_best_surface_format( + std::vector const& available_formats, std::vector const& desired_formats) { + auto surface_format_ret = detail::find_desired_surface_format(available_formats, desired_formats); + if (surface_format_ret.has_value()) return surface_format_ret.value(); + + // use the first available format as a fallback if any desired formats aren't found + return available_formats[0]; +} + +VkPresentModeKHR find_present_mode(std::vector const& available_resent_modes, + std::vector const& desired_present_modes) { + for (auto const& desired_pm : desired_present_modes) { + for (auto const& available_pm : available_resent_modes) { + // finds the first present mode that is desired and available + if (desired_pm == available_pm) return desired_pm; + } + } + // only present mode required, use as a fallback + return VK_PRESENT_MODE_FIFO_KHR; +} + +template T minimum(T a, T b) { return a < b ? a : b; } +template T maximum(T a, T b) { return a > b ? a : b; } + +VkExtent2D find_extent(VkSurfaceCapabilitiesKHR const& capabilities, uint32_t desired_width, uint32_t desired_height) { + if (capabilities.currentExtent.width != UINT32_MAX) { + return capabilities.currentExtent; + } else { + VkExtent2D actualExtent = { desired_width, desired_height }; + + actualExtent.width = + maximum(capabilities.minImageExtent.width, minimum(capabilities.maxImageExtent.width, actualExtent.width)); + actualExtent.height = + maximum(capabilities.minImageExtent.height, minimum(capabilities.maxImageExtent.height, actualExtent.height)); + + return actualExtent; + } +} +} // namespace detail + +void destroy_swapchain(Swapchain const& swapchain) { + if (swapchain.device != VK_NULL_HANDLE && swapchain.swapchain != VK_NULL_HANDLE) { + swapchain.internal_table.fp_vkDestroySwapchainKHR(swapchain.device, swapchain.swapchain, swapchain.allocation_callbacks); + } +} + +SwapchainBuilder::SwapchainBuilder(Device const& device) { + info.physical_device = device.physical_device.physical_device; + info.device = device.device; + info.surface = device.surface; + info.instance_version = device.instance_version; + auto present = device.get_queue_index(QueueType::present); + auto graphics = device.get_queue_index(QueueType::graphics); + assert(graphics.has_value() && present.has_value() && "Graphics and Present queue indexes must be valid"); + info.graphics_queue_index = present.value(); + info.present_queue_index = graphics.value(); +} +SwapchainBuilder::SwapchainBuilder(Device const& device, VkSurfaceKHR const surface) { + info.physical_device = device.physical_device.physical_device; + info.device = device.device; + info.surface = surface; + info.instance_version = device.instance_version; + Device temp_device = device; + temp_device.surface = surface; + auto present = temp_device.get_queue_index(QueueType::present); + auto graphics = temp_device.get_queue_index(QueueType::graphics); + assert(graphics.has_value() && present.has_value() && "Graphics and Present queue indexes must be valid"); + info.graphics_queue_index = graphics.value(); + info.present_queue_index = present.value(); +} +SwapchainBuilder::SwapchainBuilder(VkPhysicalDevice const physical_device, + VkDevice const device, + VkSurfaceKHR const surface, + uint32_t graphics_queue_index, + uint32_t present_queue_index) { + info.physical_device = physical_device; + info.device = device; + info.surface = surface; + info.graphics_queue_index = graphics_queue_index; + info.present_queue_index = present_queue_index; + if (graphics_queue_index == detail::QUEUE_INDEX_MAX_VALUE || present_queue_index == detail::QUEUE_INDEX_MAX_VALUE) { + auto queue_families = detail::get_vector_noerror( + detail::vulkan_functions().fp_vkGetPhysicalDeviceQueueFamilyProperties, physical_device); + if (graphics_queue_index == detail::QUEUE_INDEX_MAX_VALUE) + info.graphics_queue_index = detail::get_first_queue_index(queue_families, VK_QUEUE_GRAPHICS_BIT); + if (present_queue_index == detail::QUEUE_INDEX_MAX_VALUE) + info.present_queue_index = detail::get_present_queue_index(physical_device, surface, queue_families); + } +} +Result SwapchainBuilder::build() const { + if (info.surface == VK_NULL_HANDLE) { + return Result{ SwapchainError::surface_handle_not_provided }; + } + + auto desired_formats = info.desired_formats; + if (desired_formats.empty()) add_desired_formats(desired_formats); + auto desired_present_modes = info.desired_present_modes; + if (desired_present_modes.empty()) add_desired_present_modes(desired_present_modes); + + auto surface_support_ret = detail::query_surface_support_details(info.physical_device, info.surface); + if (!surface_support_ret.has_value()) + return Result{ SwapchainError::failed_query_surface_support_details, surface_support_ret.vk_result() }; + auto surface_support = surface_support_ret.value(); + + uint32_t image_count = info.min_image_count; + if (info.required_min_image_count >= 1) { + if (info.required_min_image_count < surface_support.capabilities.minImageCount) + return make_error_code(SwapchainError::required_min_image_count_too_low); + + image_count = info.required_min_image_count; + } else if (info.min_image_count == 0) { + // We intentionally use minImageCount + 1 to maintain existing behavior, even if it typically results in triple buffering on most systems. + image_count = surface_support.capabilities.minImageCount + 1; + } else { + image_count = info.min_image_count; + if (image_count < surface_support.capabilities.minImageCount) + image_count = surface_support.capabilities.minImageCount; + } + if (surface_support.capabilities.maxImageCount > 0 && image_count > surface_support.capabilities.maxImageCount) { + image_count = surface_support.capabilities.maxImageCount; + } + + VkSurfaceFormatKHR surface_format = detail::find_best_surface_format(surface_support.formats, desired_formats); + + VkExtent2D extent = detail::find_extent(surface_support.capabilities, info.desired_width, info.desired_height); + + uint32_t image_array_layers = info.array_layer_count; + if (surface_support.capabilities.maxImageArrayLayers < info.array_layer_count) + image_array_layers = surface_support.capabilities.maxImageArrayLayers; + if (info.array_layer_count == 0) image_array_layers = 1; + + uint32_t queue_family_indices[] = { info.graphics_queue_index, info.present_queue_index }; + + + VkPresentModeKHR present_mode = detail::find_present_mode(surface_support.present_modes, desired_present_modes); + + // VkSurfaceCapabilitiesKHR::supportedUsageFlags is only only valid for some present modes. For shared present modes, we should also check VkSharedPresentSurfaceCapabilitiesKHR::sharedPresentSupportedUsageFlags. + auto is_unextended_present_mode = [](VkPresentModeKHR present_mode) { + return (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) || (present_mode == VK_PRESENT_MODE_MAILBOX_KHR) || + (present_mode == VK_PRESENT_MODE_FIFO_KHR) || (present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR); + }; + + if (is_unextended_present_mode(present_mode) && + (info.image_usage_flags & surface_support.capabilities.supportedUsageFlags) != info.image_usage_flags) { + return Result{ SwapchainError::required_usage_not_supported }; + } + + VkSurfaceTransformFlagBitsKHR pre_transform = info.pre_transform; + if (info.pre_transform == static_cast(0)) + pre_transform = surface_support.capabilities.currentTransform; + + VkSwapchainCreateInfoKHR swapchain_create_info = {}; + swapchain_create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; + detail::setup_pNext_chain(swapchain_create_info, info.pNext_chain); + + swapchain_create_info.flags = info.create_flags; + swapchain_create_info.surface = info.surface; + swapchain_create_info.minImageCount = image_count; + swapchain_create_info.imageFormat = surface_format.format; + swapchain_create_info.imageColorSpace = surface_format.colorSpace; + swapchain_create_info.imageExtent = extent; + swapchain_create_info.imageArrayLayers = image_array_layers; + swapchain_create_info.imageUsage = info.image_usage_flags; + + if (info.graphics_queue_index != info.present_queue_index) { + swapchain_create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT; + swapchain_create_info.queueFamilyIndexCount = 2; + swapchain_create_info.pQueueFamilyIndices = queue_family_indices; + } else { + swapchain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; + } + + swapchain_create_info.preTransform = pre_transform; + swapchain_create_info.compositeAlpha = info.composite_alpha; + swapchain_create_info.presentMode = present_mode; + swapchain_create_info.clipped = info.clipped; + swapchain_create_info.oldSwapchain = info.old_swapchain; + Swapchain swapchain{}; + PFN_vkCreateSwapchainKHR swapchain_create_proc; + detail::vulkan_functions().get_device_proc_addr(info.device, swapchain_create_proc, "vkCreateSwapchainKHR"); + auto res = swapchain_create_proc(info.device, &swapchain_create_info, info.allocation_callbacks, &swapchain.swapchain); + + if (res != VK_SUCCESS) { + return Result{ SwapchainError::failed_create_swapchain, res }; + } + swapchain.device = info.device; + swapchain.image_format = surface_format.format; + swapchain.color_space = surface_format.colorSpace; + swapchain.image_usage_flags = info.image_usage_flags; + swapchain.extent = extent; + detail::vulkan_functions().get_device_proc_addr( + info.device, swapchain.internal_table.fp_vkGetSwapchainImagesKHR, "vkGetSwapchainImagesKHR"); + detail::vulkan_functions().get_device_proc_addr(info.device, swapchain.internal_table.fp_vkCreateImageView, "vkCreateImageView"); + detail::vulkan_functions().get_device_proc_addr(info.device, swapchain.internal_table.fp_vkDestroyImageView, "vkDestroyImageView"); + detail::vulkan_functions().get_device_proc_addr( + info.device, swapchain.internal_table.fp_vkDestroySwapchainKHR, "vkDestroySwapchainKHR"); + auto images = swapchain.get_images(); + if (!images) { + return Result{ SwapchainError::failed_get_swapchain_images }; + } + swapchain.requested_min_image_count = image_count; + swapchain.present_mode = present_mode; + swapchain.image_count = static_cast(images.value().size()); + swapchain.instance_version = info.instance_version; + swapchain.allocation_callbacks = info.allocation_callbacks; + return swapchain; +} +Result> Swapchain::get_images() { + std::vector swapchain_images; + + auto swapchain_images_ret = + detail::get_vector(swapchain_images, internal_table.fp_vkGetSwapchainImagesKHR, device, swapchain); + if (swapchain_images_ret != VK_SUCCESS) { + return Result>{ SwapchainError::failed_get_swapchain_images, swapchain_images_ret }; + } + return swapchain_images; +} +Result> Swapchain::get_image_views() { return get_image_views(nullptr); } +Result> Swapchain::get_image_views(const void* pNext) { + const auto swapchain_images_ret = get_images(); + if (!swapchain_images_ret) return swapchain_images_ret.error(); + const auto& swapchain_images = swapchain_images_ret.value(); + + bool already_contains_image_view_usage = false; + while (pNext) { + if (reinterpret_cast(pNext)->sType == VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO) { + already_contains_image_view_usage = true; + break; + } + pNext = reinterpret_cast(pNext)->pNext; + } + VkImageViewUsageCreateInfo desired_flags{}; + desired_flags.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO; + desired_flags.pNext = pNext; + desired_flags.usage = image_usage_flags; + + std::vector views(swapchain_images.size()); + for (size_t i = 0; i < swapchain_images.size(); i++) { + VkImageViewCreateInfo createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + if (instance_version >= VKB_VK_API_VERSION_1_1 && !already_contains_image_view_usage) { + createInfo.pNext = &desired_flags; + } else { + createInfo.pNext = pNext; + } + + createInfo.image = swapchain_images[i]; + createInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; + createInfo.format = image_format; + createInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; + createInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY; + createInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; + createInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY; + createInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + createInfo.subresourceRange.baseMipLevel = 0; + createInfo.subresourceRange.levelCount = 1; + createInfo.subresourceRange.baseArrayLayer = 0; + createInfo.subresourceRange.layerCount = 1; + VkResult res = internal_table.fp_vkCreateImageView(device, &createInfo, allocation_callbacks, &views[i]); + if (res != VK_SUCCESS) { + // Cleanup already created image views + destroy_image_views(i, views.data()); + return Result>{ SwapchainError::failed_create_swapchain_image_views, res }; + } + } + return views; +} +void Swapchain::destroy_image_views(size_t count, VkImageView const* image_views) { + for (size_t i = 0; i < count; ++i) { + internal_table.fp_vkDestroyImageView(device, image_views[i], allocation_callbacks); + } +} +void Swapchain::destroy_image_views(std::vector const& image_views) { + destroy_image_views(image_views.size(), image_views.data()); +} +#if VKB_SPAN_OVERLOADS +void Swapchain::destroy_image_views(std::span image_views) { + destroy_image_views(image_views.size(), image_views.data()); +} +#endif +Swapchain::operator VkSwapchainKHR() const { return this->swapchain; } +SwapchainBuilder& SwapchainBuilder::set_old_swapchain(VkSwapchainKHR old_swapchain) { + info.old_swapchain = old_swapchain; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_old_swapchain(Swapchain const& swapchain) { + info.old_swapchain = swapchain.swapchain; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_desired_extent(uint32_t width, uint32_t height) { + info.desired_width = width; + info.desired_height = height; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_desired_format(VkSurfaceFormatKHR format) { + info.desired_formats.insert(info.desired_formats.begin(), format); + return *this; +} +SwapchainBuilder& SwapchainBuilder::add_fallback_format(VkSurfaceFormatKHR format) { + info.desired_formats.push_back(format); + return *this; +} +SwapchainBuilder& SwapchainBuilder::use_default_format_selection() { + info.desired_formats.clear(); + add_desired_formats(info.desired_formats); + return *this; +} + +SwapchainBuilder& SwapchainBuilder::set_desired_present_mode(VkPresentModeKHR present_mode) { + info.desired_present_modes.insert(info.desired_present_modes.begin(), present_mode); + return *this; +} +SwapchainBuilder& SwapchainBuilder::add_fallback_present_mode(VkPresentModeKHR present_mode) { + info.desired_present_modes.push_back(present_mode); + return *this; +} +SwapchainBuilder& SwapchainBuilder::use_default_present_mode_selection() { + info.desired_present_modes.clear(); + add_desired_present_modes(info.desired_present_modes); + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_allocation_callbacks(VkAllocationCallbacks* callbacks) { + info.allocation_callbacks = callbacks; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_image_usage_flags(VkImageUsageFlags usage_flags) { + info.image_usage_flags = usage_flags; + return *this; +} +SwapchainBuilder& SwapchainBuilder::add_image_usage_flags(VkImageUsageFlags usage_flags) { + info.image_usage_flags = info.image_usage_flags | usage_flags; + return *this; +} +SwapchainBuilder& SwapchainBuilder::use_default_image_usage_flags() { + info.image_usage_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_image_array_layer_count(uint32_t array_layer_count) { + info.array_layer_count = array_layer_count; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_desired_min_image_count(uint32_t min_image_count) { + info.min_image_count = min_image_count; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_required_min_image_count(uint32_t required_min_image_count) { + info.required_min_image_count = required_min_image_count; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_clipped(bool clipped) { + info.clipped = clipped; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_create_flags(VkSwapchainCreateFlagBitsKHR create_flags) { + info.create_flags = create_flags; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_pre_transform_flags(VkSurfaceTransformFlagBitsKHR pre_transform_flags) { + info.pre_transform = pre_transform_flags; + return *this; +} +SwapchainBuilder& SwapchainBuilder::set_composite_alpha_flags(VkCompositeAlphaFlagBitsKHR composite_alpha_flags) { + info.composite_alpha = composite_alpha_flags; + return *this; +} + +void SwapchainBuilder::add_desired_formats(std::vector& formats) const { + formats.push_back({ VK_FORMAT_B8G8R8A8_SRGB, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR }); + formats.push_back({ VK_FORMAT_R8G8B8A8_SRGB, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR }); +} +void SwapchainBuilder::add_desired_present_modes(std::vector& modes) const { + modes.push_back(VK_PRESENT_MODE_MAILBOX_KHR); + modes.push_back(VK_PRESENT_MODE_FIFO_KHR); +} +} // namespace vkb diff --git a/extern/vk-bootstrap/src/VkBootstrap.h b/extern/vk-bootstrap/src/VkBootstrap.h new file mode 100644 index 0000000000..fd99c9a73a --- /dev/null +++ b/extern/vk-bootstrap/src/VkBootstrap.h @@ -0,0 +1,1017 @@ +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the “Software”), to deal in the Software without restriction, including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Copyright © 2020 Charles Giessen (charles@lunarg.com) + */ + +#pragma once + +#include +#include +#include +#include + +#if __cplusplus >= 202002L +#include +#endif +#if defined(__cpp_lib_span) +#define VKB_SPAN_OVERLOADS 1 +#elif !defined(VKB_SPAN_OVERLOADS) +#define VKB_SPAN_OVERLOADS 0 +#endif + +#if VKB_SPAN_OVERLOADS +#include +#endif + +#include +#include +#include +#include + +#include + +#include "VkBootstrapDispatch.h" +#include "VkBootstrapFeatureChain.h" + +#ifdef VK_MAKE_API_VERSION +#define VKB_MAKE_VK_VERSION(variant, major, minor, patch) VK_MAKE_API_VERSION(variant, major, minor, patch) +#elif defined(VK_MAKE_VERSION) +#define VKB_MAKE_VK_VERSION(variant, major, minor, patch) VK_MAKE_VERSION(major, minor, patch) +#endif + +#if defined(VK_API_VERSION_1_4) || defined(VK_VERSION_1_4) +#define VKB_VK_API_VERSION_1_4 VKB_MAKE_VK_VERSION(0, 1, 4, 0) +#endif + +#if defined(VK_API_VERSION_1_3) || defined(VK_VERSION_1_3) +#define VKB_VK_API_VERSION_1_3 VKB_MAKE_VK_VERSION(0, 1, 3, 0) +#endif + +#if defined(VK_API_VERSION_1_2) || defined(VK_VERSION_1_2) +#define VKB_VK_API_VERSION_1_2 VKB_MAKE_VK_VERSION(0, 1, 2, 0) +#endif + +#if defined(VK_API_VERSION_1_1) || defined(VK_VERSION_1_1) +#define VKB_VK_API_VERSION_1_1 VKB_MAKE_VK_VERSION(0, 1, 1, 0) +#endif + +#if defined(VK_API_VERSION_1_0) || defined(VK_VERSION_1_0) +#define VKB_VK_API_VERSION_1_0 VKB_MAKE_VK_VERSION(0, 1, 0, 0) +#endif + +namespace vkb { + +// Currently GCC's maybe-uninitialized warning gets tripped when std::variant<> contains a std::vector<>, silence it for the meantime +#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif +struct Error { + std::error_code type; + VkResult vk_result = VK_SUCCESS; // optional error value if a vulkan call failed + std::vector detailed_failure_reasons; // optional list of reasons why the operation failed - mainly used to return why VkPhysicalDevices failed to be selected +}; +#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif + +// patched because we still support macOS 10.10-10.11? and it doesn't fully support C++17's std::variant +template +class Result { +public: + Result(const T& value) : m_hasValue(true), m_value(new T(value)), m_error() {} + Result(T&& value) : m_hasValue(true), m_value(new T(std::move(value))), m_error() {} + + Result(const Error& error) : m_hasValue(false), m_value(), m_error(new Error(error)) {} + Result(Error&& error) : m_hasValue(false), m_value(), m_error(new Error(std::move(error))) {} + + Result(std::error_code error_code, VkResult result = VK_SUCCESS) + : m_hasValue(false), m_value(), m_error(new Error{ error_code, result, {} }) {} + + Result(std::error_code error_code, std::vector const& detailed_failure_reasons) + : m_hasValue(false), m_value(), m_error(new Error{ error_code, VK_SUCCESS, detailed_failure_reasons }) {} + + Result(const Result&) = delete; + Result& operator=(const Result&) = delete; + Result(Result&&) = default; + Result& operator=(Result&&) = default; + + T* operator->() { return m_value.get(); } + const T* operator->() const { return m_value.get(); } + T& operator*() & { return *m_value; } + const T& operator*() const& { return *m_value; } + T operator*() && { return std::move(*m_value); } + T& value() & { return *m_value; } + const T& value() const& { return *m_value; } + T value() && { return std::move(*m_value); } + + std::error_code error() const { return m_error->type; } + VkResult vk_result() const { return m_error->vk_result; } + Error full_error() const { return *m_error; } + const std::vector& detailed_failure_reasons() const { + return m_error->detailed_failure_reasons; + } + + bool has_value() const { return m_hasValue; } + explicit operator bool() const { return m_hasValue; } + + template + bool matches_error(E error_enum_value) const { + return !m_hasValue && static_cast(m_error->type.value()) == error_enum_value; + } + +private: + bool m_hasValue; + std::unique_ptr m_value; + std::unique_ptr m_error; +}; + +namespace detail { +class FeaturesChain { + struct StructInfo { + VkStructureType sType{}; + size_t starting_location{}; + size_t struct_size{}; + }; + std::vector structure_infos; + std::vector structures; + + std::vector::const_iterator find_sType(VkStructureType sType) const; + + public: + bool empty() const; + + bool is_feature_struct_in_chain(VkStructureType sType) const; + + // Add a features structure to the FeaturesChain if it isn't present. If it is, merge the already existing structure with structure + void add_structure(VkStructureType sType, size_t struct_size, const void* structure); + + // If a structure with sType exists, remove it from the FeatureChain + void remove_structure(VkStructureType sType); + + // Return true if this FeatureChain contains an sType struct and all of the true fields in structure are also true in the FeatureChain struct + bool match(VkStructureType sType, const void* structure) const; + + // Add to the error_list all structure fields in requested_features_chain not present in this chain + void match_all(std::vector& error_list, FeaturesChain const& requested_features_chain) const; + + void create_chained_features(VkPhysicalDeviceFeatures2& features2); + + std::vector get_pNext_chain_members(); +}; + +} // namespace detail + +enum class InstanceError { + vulkan_unavailable, + vulkan_version_unavailable, + vulkan_version_1_1_unavailable, + vulkan_version_1_2_unavailable, + vulkan_version_1_3_unavailable, + vulkan_version_1_4_unavailable, + failed_create_instance, + failed_create_debug_messenger, + requested_layers_not_present, + requested_extensions_not_present, + windowing_extensions_not_present, +}; +enum class PhysicalDeviceError { + no_surface_provided, + failed_enumerate_physical_devices, + no_physical_devices_found, + no_suitable_device, +}; +enum class QueueError { + present_unavailable, + graphics_unavailable, + compute_unavailable, + transfer_unavailable, + queue_index_out_of_range, + invalid_queue_family_index +}; +enum class DeviceError { + failed_create_device, + VkPhysicalDeviceFeatures2_in_pNext_chain_while_using_add_required_extension_features, +}; +enum class SwapchainError { + surface_handle_not_provided, + failed_query_surface_support_details, + failed_create_swapchain, + failed_get_swapchain_images, + failed_create_swapchain_image_views, + required_min_image_count_too_low, + required_usage_not_supported +}; + +std::error_code make_error_code(InstanceError instance_error); +std::error_code make_error_code(PhysicalDeviceError physical_device_error); +std::error_code make_error_code(QueueError queue_error); +std::error_code make_error_code(DeviceError device_error); +std::error_code make_error_code(SwapchainError swapchain_error); + +const char* to_string_message_severity(VkDebugUtilsMessageSeverityFlagBitsEXT s); +const char* to_string_message_type(VkDebugUtilsMessageTypeFlagsEXT s); + +const char* to_string(InstanceError err); +const char* to_string(PhysicalDeviceError err); +const char* to_string(QueueError err); +const char* to_string(DeviceError err); +const char* to_string(SwapchainError err); + +// Gathers useful information about the available vulkan capabilities, like layers and instance +// extensions. Use this for enabling features conditionally, ie if you would like an extension but +// can use a fallback if it isn't supported but need to know if support is available first. +struct SystemInfo { + private: + SystemInfo(); + + public: + // Use get_system_info to create a SystemInfo struct. This is because loading vulkan could fail. + static Result get_system_info(); + static Result get_system_info(PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr); + + // Returns true if a layer is available + bool is_layer_available(const char* layer_name) const; + // Returns true if an extension is available + bool is_extension_available(const char* extension_name) const; + // Returns true if the Instance API Version is greater than or equal to the specified version + bool is_instance_version_available(uint32_t major_api_version, uint32_t minor_api_version); + // Returns true if the Instance API Version is greater than or equal to the specified version. + // Should be constructed with VK_MAKE_VERSION or VK_MAKE_API_VERSION. + bool is_instance_version_available(uint32_t api_version); + + std::vector available_layers; + std::vector available_extensions; + bool validation_layers_available = false; + bool debug_utils_available = false; + + uint32_t instance_api_version = VKB_VK_API_VERSION_1_0; +}; + +// Forward declared - check VkBoostrap.cpp for implementations +const char* to_string_message_severity(VkDebugUtilsMessageSeverityFlagBitsEXT s); +const char* to_string_message_type(VkDebugUtilsMessageTypeFlagsEXT s); + +// Default debug messenger +// Feel free to copy-paste it into your own code, change it as needed, then call `set_debug_callback()` to use that instead +inline VKAPI_ATTR VkBool32 VKAPI_CALL default_debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageType, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void*) { + auto ms = to_string_message_severity(messageSeverity); + auto mt = to_string_message_type(messageType); + if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) { + printf("[%s: %s] - %s\n%s\n", ms, mt, pCallbackData->pMessageIdName, pCallbackData->pMessage); + } else { + printf("[%s: %s]\n%s\n", ms, mt, pCallbackData->pMessage); + } + + return VK_FALSE; // Applications must return false here (Except Validation, if return true, will skip calling to driver) +} + +class InstanceBuilder; +class PhysicalDeviceSelector; + +struct Instance { + VkInstance instance = VK_NULL_HANDLE; + VkDebugUtilsMessengerEXT debug_messenger = VK_NULL_HANDLE; + VkAllocationCallbacks* allocation_callbacks = nullptr; + PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr = nullptr; + PFN_vkGetDeviceProcAddr fp_vkGetDeviceProcAddr = nullptr; + // The apiVersion used to create the instance + uint32_t instance_version = VKB_VK_API_VERSION_1_0; + // The instance version queried from vkEnumerateInstanceVersion + uint32_t api_version = VKB_VK_API_VERSION_1_0; + + // A conversion function which allows this Instance to be used + // in places where VkInstance would have been used. + operator VkInstance() const; + + // Return a loaded instance dispatch table + InstanceDispatchTable make_table() const; + + private: + bool headless = false; + bool properties2_ext_enabled = false; + + friend class InstanceBuilder; + friend class PhysicalDeviceSelector; +}; + +void destroy_surface(Instance const& instance, VkSurfaceKHR surface); // release surface handle +void destroy_surface(VkInstance instance, VkSurfaceKHR surface, VkAllocationCallbacks* callbacks = nullptr); // release surface handle +void destroy_instance(Instance const& instance); // release instance resources + +/* If headless mode is false, by default vk-bootstrap use the following logic to enable the windowing extensions + +#if defined(_WIN32) + VK_KHR_win32_surface +#elif defined(__ANDROID__) + VK_KHR_android_surface +#elif defined(_DIRECT2DISPLAY) + VK_KHR_display +#elif defined(__linux__) || defined(__FreeBSD__) + VK_KHR_xcb_surface + VK_KHR_xlib_surface + VK_KHR_wayland_surface +#elif defined(__APPLE__) + VK_EXT_metal_surface +#endif + +Use `InstanceBuilder::enable_extension()` to add new extensions without altering the default behavior +Feel free to make a PR or raise an issue to include additional platforms. +*/ + +class InstanceBuilder { + public: + // Default constructor, will load vulkan. + explicit InstanceBuilder(); + // Optional: Can use your own PFN_vkGetInstanceProcAddr + explicit InstanceBuilder(PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr); + + // Create a VkInstance. Return an error if it failed. + Result build() const; + + // Sets the name of the application. Defaults to "" if none is provided. + InstanceBuilder& set_app_name(const char* app_name); + // Sets the name of the engine. Defaults to "" if none is provided. + InstanceBuilder& set_engine_name(const char* engine_name); + + // Sets the version of the application. + // Should be constructed with VK_MAKE_VERSION or VK_MAKE_API_VERSION. + InstanceBuilder& set_app_version(uint32_t app_version); + // Sets the (major, minor, patch) version of the application. + InstanceBuilder& set_app_version(uint32_t major, uint32_t minor, uint32_t patch = 0); + + // Sets the version of the engine. + // Should be constructed with VK_MAKE_VERSION or VK_MAKE_API_VERSION. + InstanceBuilder& set_engine_version(uint32_t engine_version); + // Sets the (major, minor, patch) version of the engine. + InstanceBuilder& set_engine_version(uint32_t major, uint32_t minor, uint32_t patch = 0); + + // Require a vulkan API version. Will fail to create if this version isn't available. + // Should be constructed with VK_MAKE_VERSION or VK_MAKE_API_VERSION. + InstanceBuilder& require_api_version(uint32_t required_api_version); + // Require a vulkan API version. Will fail to create if this version isn't available. + InstanceBuilder& require_api_version(uint32_t major, uint32_t minor, uint32_t patch = 0); + + // Overrides required API version for instance creation. Will fail to create if this version isn't available. + // Should be constructed with VK_MAKE_VERSION or VK_MAKE_API_VERSION. + InstanceBuilder& set_minimum_instance_version(uint32_t minimum_instance_version); + // Overrides required API version for instance creation. Will fail to create if this version isn't available. + InstanceBuilder& set_minimum_instance_version(uint32_t major, uint32_t minor, uint32_t patch = 0); + + // Adds a layer to be enabled. Will fail to create an instance if the layer isn't available. + InstanceBuilder& enable_layer(const char* layer_name); + // Adds an extension to be enabled. Will fail to create an instance if the extension isn't available. + InstanceBuilder& enable_extension(const char* extension_name); + + // Add extensions to be enabled. Will fail to create an instance if the extension aren't available. + InstanceBuilder& enable_extensions(size_t count, const char* const* extensions); + + // Add extensions to be enabled. Will fail to create an instance if the extension aren't available. + InstanceBuilder& enable_extensions(std::vector const& extensions) { + return enable_extensions(extensions.size(), extensions.data()); + } + +#if VKB_SPAN_OVERLOADS + // Add extensions to be enabled. Will fail to create an instance if the extension aren't available. + InstanceBuilder& enable_extensions(std::span extensions) { + return enable_extensions(extensions.size(), extensions.data()); + } +#endif + + // Headless Mode does not load the required extensions for presentation. Defaults to true. + InstanceBuilder& set_headless(bool headless = true); + + // Enables the validation layers. Will fail to create an instance if the validation layers aren't available. + InstanceBuilder& enable_validation_layers(bool require_validation = true); + // Checks if the validation layers are available and loads them if they are. + InstanceBuilder& request_validation_layers(bool enable_validation = true); + + // Use a default debug callback that prints to standard out. + InstanceBuilder& use_default_debug_messenger(); + // Provide a user defined debug callback. + InstanceBuilder& set_debug_callback(PFN_vkDebugUtilsMessengerCallbackEXT callback); + // Sets the void* to use in the debug messenger - only useful with a custom callback + InstanceBuilder& set_debug_callback_user_data_pointer(void* user_data_pointer); + // Set what message severity is needed to trigger the callback. + InstanceBuilder& set_debug_messenger_severity(VkDebugUtilsMessageSeverityFlagsEXT severity); + // Add a message severity to the list that triggers the callback. + InstanceBuilder& add_debug_messenger_severity(VkDebugUtilsMessageSeverityFlagsEXT severity); + // Set what message type triggers the callback. + InstanceBuilder& set_debug_messenger_type(VkDebugUtilsMessageTypeFlagsEXT type); + // Add a message type to the list of that triggers the callback. + InstanceBuilder& add_debug_messenger_type(VkDebugUtilsMessageTypeFlagsEXT type); + + // Disable some validation checks. + // Checks: All, and Shaders + InstanceBuilder& add_validation_disable(VkValidationCheckEXT check); + + // Enables optional parts of the validation layers. + // Parts: best practices, gpu assisted, and gpu assisted reserve binding slot. + InstanceBuilder& add_validation_feature_enable(VkValidationFeatureEnableEXT enable); + + // Disables sections of the validation layers. + // Options: All, shaders, thread safety, api parameters, object lifetimes, core checks, and unique handles. + InstanceBuilder& add_validation_feature_disable(VkValidationFeatureDisableEXT disable); + + // Provide custom allocation callbacks. + InstanceBuilder& set_allocation_callbacks(VkAllocationCallbacks* callbacks); + + // Set a setting on a requested layer via VK_EXT_layer_settings + InstanceBuilder& add_layer_setting(VkLayerSettingEXT setting); + + private: + struct InstanceInfo { + // VkApplicationInfo + const char* app_name = nullptr; + const char* engine_name = nullptr; + uint32_t application_version = 0; + uint32_t engine_version = 0; + uint32_t minimum_instance_version = 0; + uint32_t required_api_version = VKB_VK_API_VERSION_1_0; + + // VkInstanceCreateInfo + std::vector layers; + std::vector extensions; + VkInstanceCreateFlags flags = static_cast(0); + std::vector layer_settings; + + // debug callback - use the default so it is not nullptr + PFN_vkDebugUtilsMessengerCallbackEXT debug_callback = default_debug_callback; + VkDebugUtilsMessageSeverityFlagsEXT debug_message_severity = + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + VkDebugUtilsMessageTypeFlagsEXT debug_message_type = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + void* debug_user_data_pointer = nullptr; + + // validation features + std::vector disabled_validation_checks; + std::vector enabled_validation_features; + std::vector disabled_validation_features; + + // Custom allocator + VkAllocationCallbacks* allocation_callbacks = nullptr; + + bool request_validation_layers = false; + bool enable_validation_layers = false; + bool use_debug_messenger = false; + bool headless_context = false; + + PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr = nullptr; + } info; +}; + +VKAPI_ATTR VkBool32 VKAPI_CALL default_debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageType, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + +void destroy_debug_utils_messenger( + VkInstance const instance, VkDebugUtilsMessengerEXT const messenger, VkAllocationCallbacks* allocation_callbacks = nullptr); + +// ---- Physical Device ---- // +class PhysicalDeviceSelector; +class DeviceBuilder; + +struct PhysicalDevice { + std::string name; + VkPhysicalDevice physical_device = VK_NULL_HANDLE; + VkSurfaceKHR surface = VK_NULL_HANDLE; + + // Note that this reflects selected features carried over from required features, not all features the physical device supports. + VkPhysicalDeviceFeatures features{}; + VkPhysicalDeviceProperties properties{}; + VkPhysicalDeviceMemoryProperties memory_properties{}; + + // Has a queue family that supports compute operations but not graphics nor transfer. + bool has_dedicated_compute_queue() const; + // Has a queue family that supports transfer operations but not graphics nor compute. + bool has_dedicated_transfer_queue() const; + + // Has a queue family that supports transfer operations but not graphics. + bool has_separate_compute_queue() const; + // Has a queue family that supports transfer operations but not graphics. + bool has_separate_transfer_queue() const; + + // Advanced: Get the VkQueueFamilyProperties of the device if special queue setup is needed + std::vector get_queue_families() const; + + // Query the list of extensions which should be enabled + std::vector get_extensions() const; + + // Query the list of extensions which the physical device supports + std::vector get_available_extensions() const; + + // Returns true if an extension should be enabled on the device + bool is_extension_present(const char* extension) const; + + // Returns true if all the features are present + template bool are_extension_features_present(T const& features) const { + return extended_features_chain.match(static_cast(features.sType), &features); + } + + // If the given extension is present, make the extension be enabled on the device. + // Returns true the extension is present. + bool enable_extension_if_present(const char* extension); + + // If all the given extensions are present, make all the extensions be enabled on the device. + // Returns true if all the extensions are present. + bool enable_extensions_if_present(size_t count, const char* const* extensions); + bool enable_extensions_if_present(const std::vector& extensions) { + return enable_extensions_if_present(extensions.size(), extensions.data()); + } + +#if VKB_SPAN_OVERLOADS + bool enable_extensions_if_present(std::span extensions) { + return enable_extensions_if_present(extensions.size(), extensions.data()); + } +#endif + + // If the features from VkPhysicalDeviceFeatures are all present, make all of the features be enable on the device. + // Returns true if all the features are present. + bool enable_features_if_present(const VkPhysicalDeviceFeatures& features_to_enable); + + // If the features from the provided features struct are all present, make all of the features be enable on the + // device. Returns true if all of the features are present. + template bool enable_extension_features_if_present(T const& features_check) { + T scratch_space_struct{}; + scratch_space_struct.sType = features_check.sType; + return enable_features_struct_if_present( + static_cast(features_check.sType), sizeof(T), &features_check, &scratch_space_struct); + } + + // A conversion function which allows this PhysicalDevice to be used + // in places where VkPhysicalDevice would have been used. + operator VkPhysicalDevice() const; + + private: + uint32_t instance_version = VKB_VK_API_VERSION_1_0; + std::vector extensions_to_enable; + std::vector available_extensions; + std::vector queue_families; + detail::FeaturesChain extended_features_chain; + + bool defer_surface_initialization = false; + bool properties2_ext_enabled = false; + enum class Suitable { yes, partial, no }; + Suitable suitable = Suitable::yes; + friend class PhysicalDeviceSelector; + friend class DeviceBuilder; + + bool enable_features_struct_if_present(VkStructureType sType, size_t struct_size, const void* features_struct, void* query_struct); +}; + +enum class PreferredDeviceType { other = 0, integrated = 1, discrete = 2, virtual_gpu = 3, cpu = 4 }; + + +// Enumerates the physical devices on the system, and based on the added criteria, returns a physical device or list of +// physical devies A device is considered suitable if it meets all the 'required' criteria. +class PhysicalDeviceSelector { + public: + // Requires a vkb::Instance to construct, needed to pass instance creation info. + explicit PhysicalDeviceSelector(Instance const& instance); + // Requires a vkb::Instance to construct, needed to pass instance creation info, optionally specify the surface here + explicit PhysicalDeviceSelector(Instance const& instance, VkSurfaceKHR surface); + + // Return the first device which is suitable + // use the `selection` parameter to configure if partially + Result select() const; + + // Return all devices which are considered suitable - intended for applications which want to let the user pick the physical device + Result> select_devices() const; + + // Return the names of all devices which are considered suitable - intended for applications which want to let the user pick the physical device + Result> select_device_names() const; + + // Set the surface in which the physical device should render to. + // Be sure to set it if swapchain functionality is to be used. + PhysicalDeviceSelector& set_surface(VkSurfaceKHR surface); + + // Set the name of the device to select. + PhysicalDeviceSelector& set_name(std::string const& name); + // Set the desired physical device type to select. Defaults to PreferredDeviceType::discrete. + PhysicalDeviceSelector& prefer_gpu_device_type(PreferredDeviceType type = PreferredDeviceType::discrete); + // Allow selection of a gpu device type that isn't the preferred physical device type. Defaults to true. + PhysicalDeviceSelector& allow_any_gpu_device_type(bool allow_any_type = true); + + // Require that a physical device supports presentation. Defaults to true. + PhysicalDeviceSelector& require_present(bool require = true); + + // Require a queue family that supports compute operations but not graphics nor transfer. + PhysicalDeviceSelector& require_dedicated_compute_queue(); + // Require a queue family that supports transfer operations but not graphics nor compute. + PhysicalDeviceSelector& require_dedicated_transfer_queue(); + + // Require a queue family that supports compute operations but not graphics. + PhysicalDeviceSelector& require_separate_compute_queue(); + // Require a queue family that supports transfer operations but not graphics. + PhysicalDeviceSelector& require_separate_transfer_queue(); + + // Require a memory heap from VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT with `size` memory available. + PhysicalDeviceSelector& required_device_memory_size(VkDeviceSize size); + + // Require a physical device which supports a specific extension. + PhysicalDeviceSelector& add_required_extension(const char* extension); + // Require a physical device which supports a set of extensions. + PhysicalDeviceSelector& add_required_extensions(size_t count, const char* const* extensions); + PhysicalDeviceSelector& add_required_extensions(std::vector const& extensions) { + return add_required_extensions(extensions.size(), extensions.data()); + } + +#if VKB_SPAN_OVERLOADS + // Require a physical device which supports a set of extensions. + PhysicalDeviceSelector& add_required_extensions(std::span extensions) { + return add_required_extensions(extensions.size(), extensions.data()); + } +#endif + + // Require a physical device that supports a (major, minor) version of vulkan. + PhysicalDeviceSelector& set_minimum_version(uint32_t major, uint32_t minor); + + // By default PhysicalDeviceSelector enables the portability subset if available + // This function disables that behavior + PhysicalDeviceSelector& disable_portability_subset(); + // Require a physical device which supports a specific set of general/extension features. + // If this function is used, the user should not put their own VkPhysicalDeviceFeatures2 in + // the pNext chain of VkDeviceCreateInfo. + template PhysicalDeviceSelector& add_required_extension_features(T const& features) { + criteria.extended_features_chain.add_structure(static_cast(features.sType), sizeof(T), &features); + return *this; + } + + // Require a physical device which supports the features in VkPhysicalDeviceFeatures. + PhysicalDeviceSelector& set_required_features(VkPhysicalDeviceFeatures const& features); +#if defined(VKB_VK_API_VERSION_1_2) + // Require a physical device which supports the features in VkPhysicalDeviceVulkan11Features. + // Must have vulkan version 1.2 - This is due to the VkPhysicalDeviceVulkan11Features struct being added in 1.2, not 1.1 + PhysicalDeviceSelector& set_required_features_11(VkPhysicalDeviceVulkan11Features const& features_11); + // Require a physical device which supports the features in VkPhysicalDeviceVulkan12Features. + // Must have vulkan version 1.2 + PhysicalDeviceSelector& set_required_features_12(VkPhysicalDeviceVulkan12Features const& features_12); +#endif +#if defined(VKB_VK_API_VERSION_1_3) + // Require a physical device which supports the features in VkPhysicalDeviceVulkan13Features. + // Must have vulkan version 1.3 + PhysicalDeviceSelector& set_required_features_13(VkPhysicalDeviceVulkan13Features const& features_13); +#endif +#if defined(VKB_VK_API_VERSION_1_4) + // Require a physical device which supports the features in VkPhysicalDeviceVulkan13Features. + // Must have vulkan version 1.4 + PhysicalDeviceSelector& set_required_features_14(VkPhysicalDeviceVulkan14Features const& features_14); +#endif + + // Used when surface creation happens after physical device selection. + // Warning: This disables checking if the physical device supports a given surface. + PhysicalDeviceSelector& defer_surface_initialization(); + + // Ignore all criteria and choose the first physical device that is available. + // Only use when: The first gpu in the list may be set by global user preferences and an application may wish to respect it. + PhysicalDeviceSelector& select_first_device_unconditionally(bool unconditionally = true); + + private: + struct InstanceInfo { + VkInstance instance = VK_NULL_HANDLE; + VkSurfaceKHR surface = VK_NULL_HANDLE; + uint32_t version = VKB_VK_API_VERSION_1_0; + bool headless = false; + bool properties2_ext_enabled = false; + } instance_info; + + // We copy the extension features stored in the selector criteria under the prose of a + // "template" to ensure that after fetching everything is compared 1:1 during a match. + + struct SelectionCriteria { + std::string name; + PreferredDeviceType preferred_type = PreferredDeviceType::discrete; + bool allow_any_type = true; + bool require_present = true; + bool require_dedicated_transfer_queue = false; + bool require_dedicated_compute_queue = false; + bool require_separate_transfer_queue = false; + bool require_separate_compute_queue = false; + VkDeviceSize required_mem_size = 0; + + std::vector required_extensions; + + uint32_t required_version = VKB_VK_API_VERSION_1_0; + + VkPhysicalDeviceFeatures required_features{}; + VkPhysicalDeviceFeatures2 required_features2{}; + + detail::FeaturesChain extended_features_chain; + bool defer_surface_initialization = false; + bool use_first_gpu_unconditionally = false; + bool enable_portability_subset = true; + } criteria; + + PhysicalDevice populate_device_details(VkPhysicalDevice phys_device, detail::FeaturesChain const& src_extended_features_chain) const; + + PhysicalDevice::Suitable is_device_suitable( + PhysicalDevice const& phys_device, std::vector& unsuitability_reasons) const; +}; + +// ---- Queue ---- // +enum class QueueType { present, graphics, compute, transfer }; + +namespace detail { +// Sentinel value, used in implementation only +inline const uint32_t QUEUE_INDEX_MAX_VALUE = UINT32_MAX; +} // namespace detail + +// ---- Device ---- // + +struct Device { + VkDevice device = VK_NULL_HANDLE; + PhysicalDevice physical_device; + VkSurfaceKHR surface = VK_NULL_HANDLE; + std::vector queue_families; + VkAllocationCallbacks* allocation_callbacks = nullptr; + PFN_vkGetDeviceProcAddr fp_vkGetDeviceProcAddr = nullptr; + uint32_t instance_version = VKB_VK_API_VERSION_1_0; + + Result get_queue_index(QueueType type) const; + // Only a compute or transfer queue type is valid. All other queue types do not support a 'dedicated' queue index + Result get_dedicated_queue_index(QueueType type) const; + + Result get_queue(QueueType type) const; + // Only a compute or transfer queue type is valid. All other queue types do not support a 'dedicated' queue + Result get_dedicated_queue(QueueType type) const; + + // Return a loaded dispatch table + DispatchTable make_table() const; + + // A conversion function which allows this Device to be used + // in places where VkDevice would have been used. + operator VkDevice() const; + + private: + struct { + PFN_vkGetDeviceQueue fp_vkGetDeviceQueue = nullptr; + PFN_vkDestroyDevice fp_vkDestroyDevice = nullptr; + } internal_table; + friend class DeviceBuilder; + friend void destroy_device(Device const& device); +}; + + +// For advanced device queue setup +struct CustomQueueDescription { + explicit CustomQueueDescription(uint32_t index, std::vector const& priorities) + : index(index), priorities(priorities) {} + + explicit CustomQueueDescription(uint32_t index, std::vector&& priorities) + : index(index), priorities(std::move(priorities)) {} + + explicit CustomQueueDescription(uint32_t index, size_t count, float const* priorities) + : index(index), priorities(priorities, priorities + count) {} + +#if VKB_SPAN_OVERLOADS + explicit CustomQueueDescription(uint32_t index, std::span priorities) + : index(index), priorities(priorities.begin(), priorities.end()) {} +#endif + + uint32_t index; + std::vector priorities; +}; + +void destroy_device(Device const& device); + +class DeviceBuilder { + public: + // Any features and extensions that are requested/required in PhysicalDeviceSelector are automatically enabled. + explicit DeviceBuilder(PhysicalDevice physical_device); + + Result build() const; + + // For Advanced Users: specify the exact list of VkDeviceQueueCreateInfo's needed for the application. + // If a custom queue setup is provided, getting the queues and queue indexes is up to the application. + DeviceBuilder& custom_queue_setup(size_t count, CustomQueueDescription const* queue_descriptions); + DeviceBuilder& custom_queue_setup(std::vector const& queue_descriptions); + DeviceBuilder& custom_queue_setup(std::vector&& queue_descriptions); +#if VKB_SPAN_OVERLOADS + DeviceBuilder& custom_queue_setup(std::span queue_descriptions); +#endif + + // Add a structure to the pNext chain of VkDeviceCreateInfo. + // The structure must be valid when DeviceBuilder::build() is called. + template DeviceBuilder& add_pNext(T* structure) { + info.pNext_chain.push_back(structure); + return *this; + } + + // Provide custom allocation callbacks. + DeviceBuilder& set_allocation_callbacks(VkAllocationCallbacks* callbacks); + + private: + PhysicalDevice physical_device; + struct DeviceInfo { + VkDeviceCreateFlags flags = static_cast(0); + std::vector pNext_chain; + std::vector queue_descriptions; + VkAllocationCallbacks* allocation_callbacks = nullptr; + } info; +}; + +// ---- Swapchain ---- // +struct Swapchain { + VkDevice device = VK_NULL_HANDLE; + VkSwapchainKHR swapchain = VK_NULL_HANDLE; + uint32_t image_count = 0; + VkFormat image_format = VK_FORMAT_UNDEFINED; // The image format actually used when creating the swapchain. + VkColorSpaceKHR color_space = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR; // The color space actually used when creating the swapchain. + VkImageUsageFlags image_usage_flags = 0; + VkExtent2D extent = { 0, 0 }; + // The value of minImageCount actually used when creating the swapchain; note that the presentation engine is always free to create more images than that. + uint32_t requested_min_image_count = 0; + VkPresentModeKHR present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR; // The present mode actually used when creating the swapchain. + uint32_t instance_version = VKB_VK_API_VERSION_1_0; + VkAllocationCallbacks* allocation_callbacks = nullptr; + + // Returns a vector of VkImage handles to the swapchain. + Result> get_images(); + + // Returns a vector of VkImageView's to the VkImage's of the swapchain. + // VkImageViews must be destroyed. The pNext chain must be a nullptr or a valid + // structure. + Result> get_image_views(); + Result> get_image_views(const void* pNext); + void destroy_image_views(size_t count, VkImageView const* image_views); + void destroy_image_views(std::vector const& image_views); +#if VKB_SPAN_OVERLOADS + void destroy_image_views(std::span image_views); +#endif + + // A conversion function which allows this Swapchain to be used + // in places where VkSwapchainKHR would have been used. + operator VkSwapchainKHR() const; + + private: + struct { + PFN_vkGetSwapchainImagesKHR fp_vkGetSwapchainImagesKHR = nullptr; + PFN_vkCreateImageView fp_vkCreateImageView = nullptr; + PFN_vkDestroyImageView fp_vkDestroyImageView = nullptr; + PFN_vkDestroySwapchainKHR fp_vkDestroySwapchainKHR = nullptr; + } internal_table; + friend class SwapchainBuilder; + friend void destroy_swapchain(Swapchain const& swapchain); +}; + +void destroy_swapchain(Swapchain const& swapchain); + +class SwapchainBuilder { + public: + // Construct a SwapchainBuilder with a `vkb::Device` + explicit SwapchainBuilder(Device const& device); + // Construct a SwapchainBuilder with a specific VkSurfaceKHR handle and `vkb::Device` + explicit SwapchainBuilder(Device const& device, VkSurfaceKHR const surface); + // Construct a SwapchainBuilder with Vulkan handles for the physical device, device, and surface + // Optionally can provide the uint32_t indices for the graphics and present queue + // Note: The constructor will query the graphics & present queue if the indices are not provided + explicit SwapchainBuilder(VkPhysicalDevice const physical_device, + VkDevice const device, + VkSurfaceKHR const surface, + uint32_t graphics_queue_index = detail::QUEUE_INDEX_MAX_VALUE, + uint32_t present_queue_index = detail::QUEUE_INDEX_MAX_VALUE); + + Result build() const; + + // Set the oldSwapchain member of VkSwapchainCreateInfoKHR. + // For use in rebuilding a swapchain. + SwapchainBuilder& set_old_swapchain(VkSwapchainKHR old_swapchain); + SwapchainBuilder& set_old_swapchain(Swapchain const& swapchain); + + + // Desired size of the swapchain. By default, the swapchain will use the size + // of the window being drawn to. + SwapchainBuilder& set_desired_extent(uint32_t width, uint32_t height); + + // When determining the surface format, make this the first to be used if supported. + SwapchainBuilder& set_desired_format(VkSurfaceFormatKHR format); + // Add this swapchain format to the end of the list of formats selected from. + SwapchainBuilder& add_fallback_format(VkSurfaceFormatKHR format); + // Use the default swapchain formats. This is done if no formats are provided. + // Default surface format is {VK_FORMAT_B8G8R8A8_SRGB, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR} + SwapchainBuilder& use_default_format_selection(); + + // When determining the present mode, make this the first to be used if supported. + SwapchainBuilder& set_desired_present_mode(VkPresentModeKHR present_mode); + // Add this present mode to the end of the list of present modes selected from. + SwapchainBuilder& add_fallback_present_mode(VkPresentModeKHR present_mode); + // Use the default presentation mode. This is done if no present modes are provided. + // Default present modes: VK_PRESENT_MODE_MAILBOX_KHR with fallback VK_PRESENT_MODE_FIFO_KHR + SwapchainBuilder& use_default_present_mode_selection(); + + // Set the bitmask of the image usage for acquired swapchain images. + // If the surface capabilities cannot allow it, building the swapchain will result in the `SwapchainError::required_usage_not_supported` error. + SwapchainBuilder& set_image_usage_flags(VkImageUsageFlags usage_flags); + // Add a image usage to the bitmask for acquired swapchain images. + SwapchainBuilder& add_image_usage_flags(VkImageUsageFlags usage_flags); + // Use the default image usage bitmask values. This is the default if no image usages + // are provided. The default is VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT + SwapchainBuilder& use_default_image_usage_flags(); + + // Set the number of views in for multiview/stereo surface + SwapchainBuilder& set_image_array_layer_count(uint32_t array_layer_count); + + // Convenient named constants for passing to set_desired_min_image_count(). + // Note that it is not an `enum class`, so its constants can be passed as an integer value without casting + // In other words, these might as well be `static const int`, but they benefit from being grouped together this way. + enum BufferMode { + SINGLE_BUFFERING = 1, + DOUBLE_BUFFERING = 2, + TRIPLE_BUFFERING = 3, + }; + + // Sets the desired minimum image count for the swapchain. + // Note that the presentation engine is always free to create more images than requested. + // You may pass one of the values specified in the BufferMode enum, or any integer value. + // For instance, if you pass DOUBLE_BUFFERING, the presentation engine is allowed to give you a double buffering setup, triple buffering, or more. This is up to the drivers. + SwapchainBuilder& set_desired_min_image_count(uint32_t min_image_count); + + // Sets a required minimum image count for the swapchain. + // If the surface capabilities cannot allow it, building the swapchain will result in the `SwapchainError::required_min_image_count_too_low` error. + // Otherwise, the same observations from set_desired_min_image_count() apply. + // A value of 0 is specially interpreted as meaning "no requirement", and is the behavior by default. + SwapchainBuilder& set_required_min_image_count(uint32_t required_min_image_count); + + // Set whether the Vulkan implementation is allowed to discard rendering operations that + // affect regions of the surface that are not visible. Default is true. + // Note: Applications should use the default of true if they do not expect to read back the content + // of presentable images before presenting them or after reacquiring them, and if their fragment + // shaders do not have any side effects that require them to run for all pixels in the presentable image. + SwapchainBuilder& set_clipped(bool clipped = true); + + // Set the VkSwapchainCreateFlagBitsKHR. + SwapchainBuilder& set_create_flags(VkSwapchainCreateFlagBitsKHR create_flags); + // Set the transform to be applied, like a 90 degree rotation. Default is no transform. + SwapchainBuilder& set_pre_transform_flags(VkSurfaceTransformFlagBitsKHR pre_transform_flags); + // Set the alpha channel to be used with other windows in on the system. Default is VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR. + SwapchainBuilder& set_composite_alpha_flags(VkCompositeAlphaFlagBitsKHR composite_alpha_flags); + + // Add a structure to the pNext chain of VkSwapchainCreateInfoKHR. + // The structure must be valid when SwapchainBuilder::build() is called. + template SwapchainBuilder& add_pNext(T* structure) { + info.pNext_chain.push_back(structure); + return *this; + } + + // Provide custom allocation callbacks. + SwapchainBuilder& set_allocation_callbacks(VkAllocationCallbacks* callbacks); + + private: + void add_desired_formats(std::vector& formats) const; + void add_desired_present_modes(std::vector& modes) const; + + struct SwapchainInfo { + VkPhysicalDevice physical_device = VK_NULL_HANDLE; + VkDevice device = VK_NULL_HANDLE; + std::vector pNext_chain; + VkSwapchainCreateFlagBitsKHR create_flags = static_cast(0); + VkSurfaceKHR surface = VK_NULL_HANDLE; + std::vector desired_formats; + uint32_t instance_version = VKB_VK_API_VERSION_1_0; + uint32_t desired_width = 256; + uint32_t desired_height = 256; + uint32_t array_layer_count = 1; + uint32_t min_image_count = 0; + uint32_t required_min_image_count = 0; + VkImageUsageFlags image_usage_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + uint32_t graphics_queue_index = 0; + uint32_t present_queue_index = 0; + VkSurfaceTransformFlagBitsKHR pre_transform = static_cast(0); +#if defined(__ANDROID__) + VkCompositeAlphaFlagBitsKHR composite_alpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR; +#else + VkCompositeAlphaFlagBitsKHR composite_alpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; +#endif + std::vector desired_present_modes; + bool clipped = true; + VkSwapchainKHR old_swapchain = VK_NULL_HANDLE; + VkAllocationCallbacks* allocation_callbacks = nullptr; + } info; +}; + +} // namespace vkb + + +namespace std { +template <> struct is_error_code_enum : true_type {}; +template <> struct is_error_code_enum : true_type {}; +template <> struct is_error_code_enum : true_type {}; +template <> struct is_error_code_enum : true_type {}; +template <> struct is_error_code_enum : true_type {}; +} // namespace std diff --git a/extern/vk-bootstrap/src/VkBootstrapDispatch.h b/extern/vk-bootstrap/src/VkBootstrapDispatch.h new file mode 100644 index 0000000000..c16d76eb1a --- /dev/null +++ b/extern/vk-bootstrap/src/VkBootstrapDispatch.h @@ -0,0 +1,8880 @@ +/* + * Copyright © 2021 Cody Goodson (contact@vibimanx.com) + * Copyright © 2022-2025 Charles Giessen (charles@lunarg.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the “Software”), to deal in the Software without restriction, including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +// This file is a part of VkBootstrap +// https://github.com/charles-lunarg/vk-bootstrap + + +#pragma once + +#include + +namespace vkb { + +struct InstanceDispatchTable { + InstanceDispatchTable() = default; + InstanceDispatchTable(VkInstance instance, PFN_vkGetInstanceProcAddr procAddr) : instance(instance), populated(true) { +#if (defined(VK_EXT_acquire_drm_display)) + fp_vkAcquireDrmDisplayEXT = reinterpret_cast(procAddr(instance, "vkAcquireDrmDisplayEXT")); +#endif +#if (defined(VK_NV_acquire_winrt_display)) + fp_vkAcquireWinrtDisplayNV = reinterpret_cast(procAddr(instance, "vkAcquireWinrtDisplayNV")); +#endif +#if (defined(VK_EXT_acquire_xlib_display)) + fp_vkAcquireXlibDisplayEXT = reinterpret_cast(procAddr(instance, "vkAcquireXlibDisplayEXT")); +#endif +#if (defined(VK_KHR_android_surface)) + fp_vkCreateAndroidSurfaceKHR = reinterpret_cast(procAddr(instance, "vkCreateAndroidSurfaceKHR")); +#endif +#if (defined(VK_EXT_debug_report)) + fp_vkCreateDebugReportCallbackEXT = reinterpret_cast(procAddr(instance, "vkCreateDebugReportCallbackEXT")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkCreateDebugUtilsMessengerEXT = reinterpret_cast(procAddr(instance, "vkCreateDebugUtilsMessengerEXT")); +#endif +#if (defined(VK_EXT_directfb_surface)) + fp_vkCreateDirectFBSurfaceEXT = reinterpret_cast(procAddr(instance, "vkCreateDirectFBSurfaceEXT")); +#endif +#if (defined(VK_KHR_display)) + fp_vkCreateDisplayModeKHR = reinterpret_cast(procAddr(instance, "vkCreateDisplayModeKHR")); +#endif +#if (defined(VK_KHR_display)) + fp_vkCreateDisplayPlaneSurfaceKHR = reinterpret_cast(procAddr(instance, "vkCreateDisplayPlaneSurfaceKHR")); +#endif +#if (defined(VK_EXT_headless_surface)) + fp_vkCreateHeadlessSurfaceEXT = reinterpret_cast(procAddr(instance, "vkCreateHeadlessSurfaceEXT")); +#endif +#if (defined(VK_MVK_ios_surface)) + fp_vkCreateIOSSurfaceMVK = reinterpret_cast(procAddr(instance, "vkCreateIOSSurfaceMVK")); +#endif +#if (defined(VK_FUCHSIA_imagepipe_surface)) + fp_vkCreateImagePipeSurfaceFUCHSIA = reinterpret_cast(procAddr(instance, "vkCreateImagePipeSurfaceFUCHSIA")); +#endif +#if (defined(VK_MVK_macos_surface)) + fp_vkCreateMacOSSurfaceMVK = reinterpret_cast(procAddr(instance, "vkCreateMacOSSurfaceMVK")); +#endif +#if (defined(VK_EXT_metal_surface)) + fp_vkCreateMetalSurfaceEXT = reinterpret_cast(procAddr(instance, "vkCreateMetalSurfaceEXT")); +#endif +#if (defined(VK_QNX_screen_surface)) + fp_vkCreateScreenSurfaceQNX = reinterpret_cast(procAddr(instance, "vkCreateScreenSurfaceQNX")); +#endif +#if (defined(VK_GGP_stream_descriptor_surface)) + fp_vkCreateStreamDescriptorSurfaceGGP = reinterpret_cast(procAddr(instance, "vkCreateStreamDescriptorSurfaceGGP")); +#endif +#if (defined(VK_OHOS_surface)) + fp_vkCreateSurfaceOHOS = reinterpret_cast(procAddr(instance, "vkCreateSurfaceOHOS")); +#endif +#if (defined(VK_NN_vi_surface)) + fp_vkCreateViSurfaceNN = reinterpret_cast(procAddr(instance, "vkCreateViSurfaceNN")); +#endif +#if (defined(VK_KHR_wayland_surface)) + fp_vkCreateWaylandSurfaceKHR = reinterpret_cast(procAddr(instance, "vkCreateWaylandSurfaceKHR")); +#endif +#if (defined(VK_KHR_win32_surface)) + fp_vkCreateWin32SurfaceKHR = reinterpret_cast(procAddr(instance, "vkCreateWin32SurfaceKHR")); +#endif +#if (defined(VK_KHR_xcb_surface)) + fp_vkCreateXcbSurfaceKHR = reinterpret_cast(procAddr(instance, "vkCreateXcbSurfaceKHR")); +#endif +#if (defined(VK_KHR_xlib_surface)) + fp_vkCreateXlibSurfaceKHR = reinterpret_cast(procAddr(instance, "vkCreateXlibSurfaceKHR")); +#endif +#if (defined(VK_EXT_debug_report)) + fp_vkDebugReportMessageEXT = reinterpret_cast(procAddr(instance, "vkDebugReportMessageEXT")); +#endif +#if (defined(VK_EXT_debug_report)) + fp_vkDestroyDebugReportCallbackEXT = reinterpret_cast(procAddr(instance, "vkDestroyDebugReportCallbackEXT")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkDestroyDebugUtilsMessengerEXT = reinterpret_cast(procAddr(instance, "vkDestroyDebugUtilsMessengerEXT")); +#endif + fp_vkDestroyInstance = reinterpret_cast(procAddr(instance, "vkDestroyInstance")); +#if (defined(VK_KHR_surface)) + fp_vkDestroySurfaceKHR = reinterpret_cast(procAddr(instance, "vkDestroySurfaceKHR")); +#endif + fp_vkEnumerateDeviceExtensionProperties = reinterpret_cast(procAddr(instance, "vkEnumerateDeviceExtensionProperties")); + fp_vkEnumerateDeviceLayerProperties = reinterpret_cast(procAddr(instance, "vkEnumerateDeviceLayerProperties")); +#if (defined(VK_VERSION_1_1)) + fp_vkEnumeratePhysicalDeviceGroups = reinterpret_cast(procAddr(instance, "vkEnumeratePhysicalDeviceGroups")); +#endif +#if (defined(VK_KHR_device_group_creation)) + fp_vkEnumeratePhysicalDeviceGroupsKHR = reinterpret_cast(procAddr(instance, "vkEnumeratePhysicalDeviceGroupsKHR")); +#endif +#if (defined(VK_KHR_performance_query)) + fp_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = reinterpret_cast(procAddr(instance, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR")); +#endif + fp_vkEnumeratePhysicalDevices = reinterpret_cast(procAddr(instance, "vkEnumeratePhysicalDevices")); +#if (defined(VK_KHR_get_display_properties2)) + fp_vkGetDisplayModeProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetDisplayModeProperties2KHR")); +#endif +#if (defined(VK_KHR_display)) + fp_vkGetDisplayModePropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetDisplayModePropertiesKHR")); +#endif +#if (defined(VK_KHR_get_display_properties2)) + fp_vkGetDisplayPlaneCapabilities2KHR = reinterpret_cast(procAddr(instance, "vkGetDisplayPlaneCapabilities2KHR")); +#endif +#if (defined(VK_KHR_display)) + fp_vkGetDisplayPlaneCapabilitiesKHR = reinterpret_cast(procAddr(instance, "vkGetDisplayPlaneCapabilitiesKHR")); +#endif +#if (defined(VK_KHR_display)) + fp_vkGetDisplayPlaneSupportedDisplaysKHR = reinterpret_cast(procAddr(instance, "vkGetDisplayPlaneSupportedDisplaysKHR")); +#endif +#if (defined(VK_EXT_acquire_drm_display)) + fp_vkGetDrmDisplayEXT = reinterpret_cast(procAddr(instance, "vkGetDrmDisplayEXT")); +#endif + fp_vkGetInstanceProcAddr = reinterpret_cast(procAddr(instance, "vkGetInstanceProcAddr")); +#if (defined(VK_EXT_calibrated_timestamps)) + fp_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT")); +#endif +#if (defined(VK_KHR_calibrated_timestamps)) + fp_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceCalibrateableTimeDomainsKHR")); +#endif +#if (defined(VK_NV_cooperative_matrix2)) + fp_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV")); +#endif +#if (defined(VK_KHR_cooperative_matrix)) + fp_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR")); +#endif +#if (defined(VK_NV_cooperative_matrix)) + fp_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV")); +#endif +#if (defined(VK_NV_cooperative_vector)) + fp_vkGetPhysicalDeviceCooperativeVectorPropertiesNV = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceCooperativeVectorPropertiesNV")); +#endif +#if (defined(VK_EXT_directfb_surface)) + fp_vkGetPhysicalDeviceDirectFBPresentationSupportEXT = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT")); +#endif +#if (defined(VK_KHR_get_display_properties2)) + fp_vkGetPhysicalDeviceDisplayPlaneProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR")); +#endif +#if (defined(VK_KHR_display)) + fp_vkGetPhysicalDeviceDisplayPlanePropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR")); +#endif +#if (defined(VK_KHR_get_display_properties2)) + fp_vkGetPhysicalDeviceDisplayProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceDisplayProperties2KHR")); +#endif +#if (defined(VK_KHR_display)) + fp_vkGetPhysicalDeviceDisplayPropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceDisplayPropertiesKHR")); +#endif +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceExternalBufferProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceExternalBufferProperties")); +#endif +#if (defined(VK_KHR_external_memory_capabilities)) + fp_vkGetPhysicalDeviceExternalBufferPropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceExternalBufferPropertiesKHR")); +#endif +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceExternalFenceProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceExternalFenceProperties")); +#endif +#if (defined(VK_KHR_external_fence_capabilities)) + fp_vkGetPhysicalDeviceExternalFencePropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceExternalFencePropertiesKHR")); +#endif +#if (defined(VK_NV_external_memory_capabilities)) + fp_vkGetPhysicalDeviceExternalImageFormatPropertiesNV = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV")); +#endif +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceExternalSemaphoreProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceExternalSemaphoreProperties")); +#endif +#if (defined(VK_KHR_external_semaphore_capabilities)) + fp_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkGetPhysicalDeviceExternalTensorPropertiesARM = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceExternalTensorPropertiesARM")); +#endif + fp_vkGetPhysicalDeviceFeatures = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceFeatures")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceFeatures2 = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceFeatures2")); +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + fp_vkGetPhysicalDeviceFeatures2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceFeatures2KHR")); +#endif + fp_vkGetPhysicalDeviceFormatProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceFormatProperties")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceFormatProperties2 = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceFormatProperties2")); +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + fp_vkGetPhysicalDeviceFormatProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceFormatProperties2KHR")); +#endif +#if (defined(VK_KHR_fragment_shading_rate)) + fp_vkGetPhysicalDeviceFragmentShadingRatesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceFragmentShadingRatesKHR")); +#endif + fp_vkGetPhysicalDeviceImageFormatProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceImageFormatProperties")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceImageFormatProperties2 = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceImageFormatProperties2")); +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + fp_vkGetPhysicalDeviceImageFormatProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceImageFormatProperties2KHR")); +#endif + fp_vkGetPhysicalDeviceMemoryProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceMemoryProperties")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceMemoryProperties2 = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceMemoryProperties2")); +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + fp_vkGetPhysicalDeviceMemoryProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceMemoryProperties2KHR")); +#endif +#if (defined(VK_EXT_sample_locations)) + fp_vkGetPhysicalDeviceMultisamplePropertiesEXT = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceMultisamplePropertiesEXT")); +#endif +#if (defined(VK_NV_optical_flow)) + fp_vkGetPhysicalDeviceOpticalFlowImageFormatsNV = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceOpticalFlowImageFormatsNV")); +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + fp_vkGetPhysicalDevicePresentRectanglesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDevicePresentRectanglesKHR")); +#endif + fp_vkGetPhysicalDeviceProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceProperties")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceProperties2 = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceProperties2")); +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + fp_vkGetPhysicalDeviceProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceProperties2KHR")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM")); +#endif +#if (defined(VK_KHR_performance_query)) + fp_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR")); +#endif + fp_vkGetPhysicalDeviceQueueFamilyProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceQueueFamilyProperties")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceQueueFamilyProperties2 = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceQueueFamilyProperties2")); +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + fp_vkGetPhysicalDeviceQueueFamilyProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceQueueFamilyProperties2KHR")); +#endif +#if (defined(VK_QNX_screen_surface)) + fp_vkGetPhysicalDeviceScreenPresentationSupportQNX = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceScreenPresentationSupportQNX")); +#endif + fp_vkGetPhysicalDeviceSparseImageFormatProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSparseImageFormatProperties")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetPhysicalDeviceSparseImageFormatProperties2 = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSparseImageFormatProperties2")); +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + fp_vkGetPhysicalDeviceSparseImageFormatProperties2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR")); +#endif +#if (defined(VK_NV_coverage_reduction_mode)) + fp_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV")); +#endif +#if (defined(VK_EXT_display_surface_counter)) + fp_vkGetPhysicalDeviceSurfaceCapabilities2EXT = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSurfaceCapabilities2EXT")); +#endif +#if (defined(VK_KHR_get_surface_capabilities2)) + fp_vkGetPhysicalDeviceSurfaceCapabilities2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSurfaceCapabilities2KHR")); +#endif +#if (defined(VK_KHR_surface)) + fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR")); +#endif +#if (defined(VK_KHR_get_surface_capabilities2)) + fp_vkGetPhysicalDeviceSurfaceFormats2KHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSurfaceFormats2KHR")); +#endif +#if (defined(VK_KHR_surface)) + fp_vkGetPhysicalDeviceSurfaceFormatsKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR")); +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + fp_vkGetPhysicalDeviceSurfacePresentModes2EXT = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSurfacePresentModes2EXT")); +#endif +#if (defined(VK_KHR_surface)) + fp_vkGetPhysicalDeviceSurfacePresentModesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR")); +#endif +#if (defined(VK_KHR_surface)) + fp_vkGetPhysicalDeviceSurfaceSupportKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceSurfaceSupportKHR")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkGetPhysicalDeviceToolProperties = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceToolProperties")); +#endif +#if (defined(VK_EXT_tooling_info)) + fp_vkGetPhysicalDeviceToolPropertiesEXT = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceToolPropertiesEXT")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkGetPhysicalDeviceVideoCapabilitiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceVideoCapabilitiesKHR")); +#endif +#if (defined(VK_KHR_video_encode_queue)) + fp_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkGetPhysicalDeviceVideoFormatPropertiesKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceVideoFormatPropertiesKHR")); +#endif +#if (defined(VK_KHR_wayland_surface)) + fp_vkGetPhysicalDeviceWaylandPresentationSupportKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR")); +#endif +#if (defined(VK_KHR_win32_surface)) + fp_vkGetPhysicalDeviceWin32PresentationSupportKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR")); +#endif +#if (defined(VK_KHR_xcb_surface)) + fp_vkGetPhysicalDeviceXcbPresentationSupportKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR")); +#endif +#if (defined(VK_KHR_xlib_surface)) + fp_vkGetPhysicalDeviceXlibPresentationSupportKHR = reinterpret_cast(procAddr(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR")); +#endif +#if (defined(VK_EXT_acquire_xlib_display)) + fp_vkGetRandROutputDisplayEXT = reinterpret_cast(procAddr(instance, "vkGetRandROutputDisplayEXT")); +#endif +#if (defined(VK_NV_acquire_winrt_display)) + fp_vkGetWinrtDisplayNV = reinterpret_cast(procAddr(instance, "vkGetWinrtDisplayNV")); +#endif +#if (defined(VK_EXT_direct_mode_display)) + fp_vkReleaseDisplayEXT = reinterpret_cast(procAddr(instance, "vkReleaseDisplayEXT")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkSubmitDebugUtilsMessageEXT = reinterpret_cast(procAddr(instance, "vkSubmitDebugUtilsMessageEXT")); +#endif + } +#if (defined(VK_EXT_acquire_drm_display)) + VkResult acquireDrmDisplayEXT(VkPhysicalDevice physicalDevice, int32_t drmFd, VkDisplayKHR display) const noexcept { + return fp_vkAcquireDrmDisplayEXT(physicalDevice, drmFd, display); + } +#endif +#if (defined(VK_NV_acquire_winrt_display)) + VkResult acquireWinrtDisplayNV(VkPhysicalDevice physicalDevice, VkDisplayKHR display) const noexcept { + return fp_vkAcquireWinrtDisplayNV(physicalDevice, display); + } +#endif +#if (defined(VK_EXT_acquire_xlib_display)) + VkResult acquireXlibDisplayEXT(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display) const noexcept { + return fp_vkAcquireXlibDisplayEXT(physicalDevice, dpy, display); + } +#endif +#if (defined(VK_KHR_android_surface)) + VkResult createAndroidSurfaceKHR(const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateAndroidSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_EXT_debug_report)) + VkResult createDebugReportCallbackEXT(const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback) const noexcept { + return fp_vkCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback); + } +#endif +#if (defined(VK_EXT_debug_utils)) + VkResult createDebugUtilsMessengerEXT(const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger) const noexcept { + return fp_vkCreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger); + } +#endif +#if (defined(VK_EXT_directfb_surface)) + VkResult createDirectFBSurfaceEXT(const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateDirectFBSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_KHR_display)) + VkResult createDisplayModeKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode) const noexcept { + return fp_vkCreateDisplayModeKHR(physicalDevice, display, pCreateInfo, pAllocator, pMode); + } +#endif +#if (defined(VK_KHR_display)) + VkResult createDisplayPlaneSurfaceKHR(const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateDisplayPlaneSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_EXT_headless_surface)) + VkResult createHeadlessSurfaceEXT(const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateHeadlessSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_MVK_ios_surface)) + VkResult createIOSSurfaceMVK(const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateIOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_FUCHSIA_imagepipe_surface)) + VkResult createImagePipeSurfaceFUCHSIA(const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateImagePipeSurfaceFUCHSIA(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_MVK_macos_surface)) + VkResult createMacOSSurfaceMVK(const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateMacOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_EXT_metal_surface)) + VkResult createMetalSurfaceEXT(const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateMetalSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_QNX_screen_surface)) + VkResult createScreenSurfaceQNX(const VkScreenSurfaceCreateInfoQNX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateScreenSurfaceQNX(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_GGP_stream_descriptor_surface)) + VkResult createStreamDescriptorSurfaceGGP(const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateStreamDescriptorSurfaceGGP(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_OHOS_surface)) + VkResult createSurfaceOHOS(const VkSurfaceCreateInfoOHOS* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateSurfaceOHOS(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_NN_vi_surface)) + VkResult createViSurfaceNN(const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateViSurfaceNN(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_KHR_wayland_surface)) + VkResult createWaylandSurfaceKHR(const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateWaylandSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_KHR_win32_surface)) + VkResult createWin32SurfaceKHR(const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_KHR_xcb_surface)) + VkResult createXcbSurfaceKHR(const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateXcbSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_KHR_xlib_surface)) + VkResult createXlibSurfaceKHR(const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface) const noexcept { + return fp_vkCreateXlibSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + } +#endif +#if (defined(VK_EXT_debug_report)) + void debugReportMessageEXT(VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage) const noexcept { + fp_vkDebugReportMessageEXT(instance, flags, objectType, object, location, messageCode, pLayerPrefix, pMessage); + } +#endif +#if (defined(VK_EXT_debug_report)) + void destroyDebugReportCallbackEXT(VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyDebugReportCallbackEXT(instance, callback, pAllocator); + } +#endif +#if (defined(VK_EXT_debug_utils)) + void destroyDebugUtilsMessengerEXT(VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator); + } +#endif + void destroyInstance(const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyInstance(instance, pAllocator); + } +#if (defined(VK_KHR_surface)) + void destroySurfaceKHR(VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroySurfaceKHR(instance, surface, pAllocator); + } +#endif + VkResult enumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties) const noexcept { + return fp_vkEnumerateDeviceExtensionProperties(physicalDevice, pLayerName, pPropertyCount, pProperties); + } + VkResult enumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties) const noexcept { + return fp_vkEnumerateDeviceLayerProperties(physicalDevice, pPropertyCount, pProperties); + } +#if (defined(VK_VERSION_1_1)) + VkResult enumeratePhysicalDeviceGroups(uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties) const noexcept { + return fp_vkEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties); + } +#endif +#if (defined(VK_KHR_device_group_creation)) + VkResult enumeratePhysicalDeviceGroupsKHR(uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR* pPhysicalDeviceGroupProperties) const noexcept { + return fp_vkEnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties); + } +#endif +#if (defined(VK_KHR_performance_query)) + VkResult enumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions) const noexcept { + return fp_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(physicalDevice, queueFamilyIndex, pCounterCount, pCounters, pCounterDescriptions); + } +#endif + VkResult enumeratePhysicalDevices(uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) const noexcept { + return fp_vkEnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices); + } +#if (defined(VK_KHR_get_display_properties2)) + VkResult getDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties) const noexcept { + return fp_vkGetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_KHR_display)) + VkResult getDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties) const noexcept { + return fp_vkGetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_KHR_get_display_properties2)) + VkResult getDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities) const noexcept { + return fp_vkGetDisplayPlaneCapabilities2KHR(physicalDevice, pDisplayPlaneInfo, pCapabilities); + } +#endif +#if (defined(VK_KHR_display)) + VkResult getDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities) const noexcept { + return fp_vkGetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities); + } +#endif +#if (defined(VK_KHR_display)) + VkResult getDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays) const noexcept { + return fp_vkGetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays); + } +#endif +#if (defined(VK_EXT_acquire_drm_display)) + VkResult getDrmDisplayEXT(VkPhysicalDevice physicalDevice, int32_t drmFd, uint32_t connectorId, VkDisplayKHR* display) const noexcept { + return fp_vkGetDrmDisplayEXT(physicalDevice, drmFd, connectorId, display); + } +#endif + PFN_vkVoidFunction getInstanceProcAddr(const char* pName) const noexcept { + return fp_vkGetInstanceProcAddr(instance, pName); + } +#if (defined(VK_EXT_calibrated_timestamps)) + VkResult getPhysicalDeviceCalibrateableTimeDomainsEXT(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains) const noexcept { + return fp_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(physicalDevice, pTimeDomainCount, pTimeDomains); + } +#endif +#if (defined(VK_KHR_calibrated_timestamps)) + VkResult getPhysicalDeviceCalibrateableTimeDomainsKHR(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainKHR* pTimeDomains) const noexcept { + return fp_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR(physicalDevice, pTimeDomainCount, pTimeDomains); + } +#endif +#if (defined(VK_NV_cooperative_matrix2)) + VkResult getPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixFlexibleDimensionsPropertiesNV* pProperties) const noexcept { + return fp_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(physicalDevice, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_KHR_cooperative_matrix)) + VkResult getPhysicalDeviceCooperativeMatrixPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesKHR* pProperties) const noexcept { + return fp_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(physicalDevice, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_NV_cooperative_matrix)) + VkResult getPhysicalDeviceCooperativeMatrixPropertiesNV(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties) const noexcept { + return fp_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV(physicalDevice, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_NV_cooperative_vector)) + VkResult getPhysicalDeviceCooperativeVectorPropertiesNV(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeVectorPropertiesNV* pProperties) const noexcept { + return fp_vkGetPhysicalDeviceCooperativeVectorPropertiesNV(physicalDevice, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_EXT_directfb_surface)) + VkBool32 getPhysicalDeviceDirectFBPresentationSupportEXT(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, IDirectFB* dfb) const noexcept { + return fp_vkGetPhysicalDeviceDirectFBPresentationSupportEXT(physicalDevice, queueFamilyIndex, dfb); + } +#endif +#if (defined(VK_KHR_get_display_properties2)) + VkResult getPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties) const noexcept { + return fp_vkGetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_KHR_display)) + VkResult getPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties) const noexcept { + return fp_vkGetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_KHR_get_display_properties2)) + VkResult getPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties) const noexcept { + return fp_vkGetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_KHR_display)) + VkResult getPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties) const noexcept { + return fp_vkGetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties) const noexcept { + fp_vkGetPhysicalDeviceExternalBufferProperties(physicalDevice, pExternalBufferInfo, pExternalBufferProperties); + } +#endif +#if (defined(VK_KHR_external_memory_capabilities)) + void getPhysicalDeviceExternalBufferPropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo, VkExternalBufferPropertiesKHR* pExternalBufferProperties) const noexcept { + fp_vkGetPhysicalDeviceExternalBufferPropertiesKHR(physicalDevice, pExternalBufferInfo, pExternalBufferProperties); + } +#endif +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceExternalFenceProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties) const noexcept { + fp_vkGetPhysicalDeviceExternalFenceProperties(physicalDevice, pExternalFenceInfo, pExternalFenceProperties); + } +#endif +#if (defined(VK_KHR_external_fence_capabilities)) + void getPhysicalDeviceExternalFencePropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo, VkExternalFencePropertiesKHR* pExternalFenceProperties) const noexcept { + fp_vkGetPhysicalDeviceExternalFencePropertiesKHR(physicalDevice, pExternalFenceInfo, pExternalFenceProperties); + } +#endif +#if (defined(VK_NV_external_memory_capabilities)) + VkResult getPhysicalDeviceExternalImageFormatPropertiesNV(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties) const noexcept { + return fp_vkGetPhysicalDeviceExternalImageFormatPropertiesNV(physicalDevice, format, type, tiling, usage, flags, externalHandleType, pExternalImageFormatProperties); + } +#endif +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceExternalSemaphoreProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties) const noexcept { + fp_vkGetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties); + } +#endif +#if (defined(VK_KHR_external_semaphore_capabilities)) + void getPhysicalDeviceExternalSemaphorePropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo, VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties) const noexcept { + fp_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties); + } +#endif +#if (defined(VK_ARM_tensors)) + void getPhysicalDeviceExternalTensorPropertiesARM(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalTensorInfoARM* pExternalTensorInfo, VkExternalTensorPropertiesARM* pExternalTensorProperties) const noexcept { + fp_vkGetPhysicalDeviceExternalTensorPropertiesARM(physicalDevice, pExternalTensorInfo, pExternalTensorProperties); + } +#endif + void getPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures) const noexcept { + fp_vkGetPhysicalDeviceFeatures(physicalDevice, pFeatures); + } +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures) const noexcept { + fp_vkGetPhysicalDeviceFeatures2(physicalDevice, pFeatures); + } +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + void getPhysicalDeviceFeatures2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2KHR* pFeatures) const noexcept { + fp_vkGetPhysicalDeviceFeatures2KHR(physicalDevice, pFeatures); + } +#endif + void getPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties) const noexcept { + fp_vkGetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties); + } +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties) const noexcept { + fp_vkGetPhysicalDeviceFormatProperties2(physicalDevice, format, pFormatProperties); + } +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + void getPhysicalDeviceFormatProperties2KHR(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2KHR* pFormatProperties) const noexcept { + fp_vkGetPhysicalDeviceFormatProperties2KHR(physicalDevice, format, pFormatProperties); + } +#endif +#if (defined(VK_KHR_fragment_shading_rate)) + VkResult getPhysicalDeviceFragmentShadingRatesKHR(VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates) const noexcept { + return fp_vkGetPhysicalDeviceFragmentShadingRatesKHR(physicalDevice, pFragmentShadingRateCount, pFragmentShadingRates); + } +#endif + VkResult getPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties) const noexcept { + return fp_vkGetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties); + } +#if (defined(VK_VERSION_1_1)) + VkResult getPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties) const noexcept { + return fp_vkGetPhysicalDeviceImageFormatProperties2(physicalDevice, pImageFormatInfo, pImageFormatProperties); + } +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + VkResult getPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2KHR* pImageFormatInfo, VkImageFormatProperties2KHR* pImageFormatProperties) const noexcept { + return fp_vkGetPhysicalDeviceImageFormatProperties2KHR(physicalDevice, pImageFormatInfo, pImageFormatProperties); + } +#endif + void getPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties) const noexcept { + fp_vkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties); + } +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties) const noexcept { + fp_vkGetPhysicalDeviceMemoryProperties2(physicalDevice, pMemoryProperties); + } +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + void getPhysicalDeviceMemoryProperties2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2KHR* pMemoryProperties) const noexcept { + fp_vkGetPhysicalDeviceMemoryProperties2KHR(physicalDevice, pMemoryProperties); + } +#endif +#if (defined(VK_EXT_sample_locations)) + void getPhysicalDeviceMultisamplePropertiesEXT(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties) const noexcept { + fp_vkGetPhysicalDeviceMultisamplePropertiesEXT(physicalDevice, samples, pMultisampleProperties); + } +#endif +#if (defined(VK_NV_optical_flow)) + VkResult getPhysicalDeviceOpticalFlowImageFormatsNV(VkPhysicalDevice physicalDevice, const VkOpticalFlowImageFormatInfoNV* pOpticalFlowImageFormatInfo, uint32_t* pFormatCount, VkOpticalFlowImageFormatPropertiesNV* pImageFormatProperties) const noexcept { + return fp_vkGetPhysicalDeviceOpticalFlowImageFormatsNV(physicalDevice, pOpticalFlowImageFormatInfo, pFormatCount, pImageFormatProperties); + } +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + VkResult getPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects) const noexcept { + return fp_vkGetPhysicalDevicePresentRectanglesKHR(physicalDevice, surface, pRectCount, pRects); + } +#endif + void getPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties) const noexcept { + fp_vkGetPhysicalDeviceProperties(physicalDevice, pProperties); + } +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties) const noexcept { + fp_vkGetPhysicalDeviceProperties2(physicalDevice, pProperties); + } +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + void getPhysicalDeviceProperties2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2KHR* pProperties) const noexcept { + fp_vkGetPhysicalDeviceProperties2KHR(physicalDevice, pProperties); + } +#endif +#if (defined(VK_ARM_data_graph)) + void getPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceQueueFamilyDataGraphProcessingEngineInfoARM* pQueueFamilyDataGraphProcessingEngineInfo, VkQueueFamilyDataGraphProcessingEnginePropertiesARM* pQueueFamilyDataGraphProcessingEngineProperties) const noexcept { + fp_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM(physicalDevice, pQueueFamilyDataGraphProcessingEngineInfo, pQueueFamilyDataGraphProcessingEngineProperties); + } +#endif +#if (defined(VK_ARM_data_graph)) + VkResult getPhysicalDeviceQueueFamilyDataGraphPropertiesARM(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pQueueFamilyDataGraphPropertyCount, VkQueueFamilyDataGraphPropertiesARM* pQueueFamilyDataGraphProperties) const noexcept { + return fp_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM(physicalDevice, queueFamilyIndex, pQueueFamilyDataGraphPropertyCount, pQueueFamilyDataGraphProperties); + } +#endif +#if (defined(VK_KHR_performance_query)) + void getPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses) const noexcept { + fp_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(physicalDevice, pPerformanceQueryCreateInfo, pNumPasses); + } +#endif + void getPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties) const noexcept { + fp_vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties) const noexcept { + fp_vkGetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + void getPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR* pQueueFamilyProperties) const noexcept { + fp_vkGetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } +#endif +#if (defined(VK_QNX_screen_surface)) + VkBool32 getPhysicalDeviceScreenPresentationSupportQNX(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct _screen_window* window) const noexcept { + return fp_vkGetPhysicalDeviceScreenPresentationSupportQNX(physicalDevice, queueFamilyIndex, window); + } +#endif + void getPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties) const noexcept { + fp_vkGetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties); + } +#if (defined(VK_VERSION_1_1)) + void getPhysicalDeviceSparseImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties) const noexcept { + fp_vkGetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + void getPhysicalDeviceSparseImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2KHR* pProperties) const noexcept { + fp_vkGetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount, pProperties); + } +#endif +#if (defined(VK_NV_coverage_reduction_mode)) + VkResult getPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations) const noexcept { + return fp_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physicalDevice, pCombinationCount, pCombinations); + } +#endif +#if (defined(VK_EXT_display_surface_counter)) + VkResult getPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities) const noexcept { + return fp_vkGetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities); + } +#endif +#if (defined(VK_KHR_get_surface_capabilities2)) + VkResult getPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities) const noexcept { + return fp_vkGetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities); + } +#endif +#if (defined(VK_KHR_surface)) + VkResult getPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities) const noexcept { + return fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities); + } +#endif +#if (defined(VK_KHR_get_surface_capabilities2)) + VkResult getPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats) const noexcept { + return fp_vkGetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo, pSurfaceFormatCount, pSurfaceFormats); + } +#endif +#if (defined(VK_KHR_surface)) + VkResult getPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats) const noexcept { + return fp_vkGetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats); + } +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + VkResult getPhysicalDeviceSurfacePresentModes2EXT(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes) const noexcept { + return fp_vkGetPhysicalDeviceSurfacePresentModes2EXT(physicalDevice, pSurfaceInfo, pPresentModeCount, pPresentModes); + } +#endif +#if (defined(VK_KHR_surface)) + VkResult getPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes) const noexcept { + return fp_vkGetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes); + } +#endif +#if (defined(VK_KHR_surface)) + VkResult getPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported) const noexcept { + return fp_vkGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported); + } +#endif +#if (defined(VK_VERSION_1_3)) + VkResult getPhysicalDeviceToolProperties(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties) const noexcept { + return fp_vkGetPhysicalDeviceToolProperties(physicalDevice, pToolCount, pToolProperties); + } +#endif +#if (defined(VK_EXT_tooling_info)) + VkResult getPhysicalDeviceToolPropertiesEXT(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolPropertiesEXT* pToolProperties) const noexcept { + return fp_vkGetPhysicalDeviceToolPropertiesEXT(physicalDevice, pToolCount, pToolProperties); + } +#endif +#if (defined(VK_KHR_video_queue)) + VkResult getPhysicalDeviceVideoCapabilitiesKHR(VkPhysicalDevice physicalDevice, const VkVideoProfileInfoKHR* pVideoProfile, VkVideoCapabilitiesKHR* pCapabilities) const noexcept { + return fp_vkGetPhysicalDeviceVideoCapabilitiesKHR(physicalDevice, pVideoProfile, pCapabilities); + } +#endif +#if (defined(VK_KHR_video_encode_queue)) + VkResult getPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR* pQualityLevelInfo, VkVideoEncodeQualityLevelPropertiesKHR* pQualityLevelProperties) const noexcept { + return fp_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR(physicalDevice, pQualityLevelInfo, pQualityLevelProperties); + } +#endif +#if (defined(VK_KHR_video_queue)) + VkResult getPhysicalDeviceVideoFormatPropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceVideoFormatInfoKHR* pVideoFormatInfo, uint32_t* pVideoFormatPropertyCount, VkVideoFormatPropertiesKHR* pVideoFormatProperties) const noexcept { + return fp_vkGetPhysicalDeviceVideoFormatPropertiesKHR(physicalDevice, pVideoFormatInfo, pVideoFormatPropertyCount, pVideoFormatProperties); + } +#endif +#if (defined(VK_KHR_wayland_surface)) + VkBool32 getPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display) const noexcept { + return fp_vkGetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display); + } +#endif +#if (defined(VK_KHR_win32_surface)) + VkBool32 getPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) const noexcept { + return fp_vkGetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex); + } +#endif +#if (defined(VK_KHR_xcb_surface)) + VkBool32 getPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id) const noexcept { + return fp_vkGetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection, visual_id); + } +#endif +#if (defined(VK_KHR_xlib_surface)) + VkBool32 getPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID) const noexcept { + return fp_vkGetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID); + } +#endif +#if (defined(VK_EXT_acquire_xlib_display)) + VkResult getRandROutputDisplayEXT(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay) const noexcept { + return fp_vkGetRandROutputDisplayEXT(physicalDevice, dpy, rrOutput, pDisplay); + } +#endif +#if (defined(VK_NV_acquire_winrt_display)) + VkResult getWinrtDisplayNV(VkPhysicalDevice physicalDevice, uint32_t deviceRelativeId, VkDisplayKHR* pDisplay) const noexcept { + return fp_vkGetWinrtDisplayNV(physicalDevice, deviceRelativeId, pDisplay); + } +#endif +#if (defined(VK_EXT_direct_mode_display)) + VkResult releaseDisplayEXT(VkPhysicalDevice physicalDevice, VkDisplayKHR display) const noexcept { + return fp_vkReleaseDisplayEXT(physicalDevice, display); + } +#endif +#if (defined(VK_EXT_debug_utils)) + void submitDebugUtilsMessageEXT(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData) const noexcept { + fp_vkSubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData); + } +#endif +#if (defined(VK_EXT_acquire_drm_display)) + PFN_vkAcquireDrmDisplayEXT fp_vkAcquireDrmDisplayEXT = nullptr; +#else + void * fp_vkAcquireDrmDisplayEXT{}; +#endif +#if (defined(VK_NV_acquire_winrt_display)) + PFN_vkAcquireWinrtDisplayNV fp_vkAcquireWinrtDisplayNV = nullptr; +#else + void * fp_vkAcquireWinrtDisplayNV{}; +#endif +#if (defined(VK_EXT_acquire_xlib_display)) + PFN_vkAcquireXlibDisplayEXT fp_vkAcquireXlibDisplayEXT = nullptr; +#else + void * fp_vkAcquireXlibDisplayEXT{}; +#endif +#if (defined(VK_KHR_android_surface)) + PFN_vkCreateAndroidSurfaceKHR fp_vkCreateAndroidSurfaceKHR = nullptr; +#else + void * fp_vkCreateAndroidSurfaceKHR{}; +#endif +#if (defined(VK_EXT_debug_report)) + PFN_vkCreateDebugReportCallbackEXT fp_vkCreateDebugReportCallbackEXT = nullptr; +#else + void * fp_vkCreateDebugReportCallbackEXT{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkCreateDebugUtilsMessengerEXT fp_vkCreateDebugUtilsMessengerEXT = nullptr; +#else + void * fp_vkCreateDebugUtilsMessengerEXT{}; +#endif +#if (defined(VK_EXT_directfb_surface)) + PFN_vkCreateDirectFBSurfaceEXT fp_vkCreateDirectFBSurfaceEXT = nullptr; +#else + void * fp_vkCreateDirectFBSurfaceEXT{}; +#endif +#if (defined(VK_KHR_display)) + PFN_vkCreateDisplayModeKHR fp_vkCreateDisplayModeKHR = nullptr; +#else + void * fp_vkCreateDisplayModeKHR{}; +#endif +#if (defined(VK_KHR_display)) + PFN_vkCreateDisplayPlaneSurfaceKHR fp_vkCreateDisplayPlaneSurfaceKHR = nullptr; +#else + void * fp_vkCreateDisplayPlaneSurfaceKHR{}; +#endif +#if (defined(VK_EXT_headless_surface)) + PFN_vkCreateHeadlessSurfaceEXT fp_vkCreateHeadlessSurfaceEXT = nullptr; +#else + void * fp_vkCreateHeadlessSurfaceEXT{}; +#endif +#if (defined(VK_MVK_ios_surface)) + PFN_vkCreateIOSSurfaceMVK fp_vkCreateIOSSurfaceMVK = nullptr; +#else + void * fp_vkCreateIOSSurfaceMVK{}; +#endif +#if (defined(VK_FUCHSIA_imagepipe_surface)) + PFN_vkCreateImagePipeSurfaceFUCHSIA fp_vkCreateImagePipeSurfaceFUCHSIA = nullptr; +#else + void * fp_vkCreateImagePipeSurfaceFUCHSIA{}; +#endif +#if (defined(VK_MVK_macos_surface)) + PFN_vkCreateMacOSSurfaceMVK fp_vkCreateMacOSSurfaceMVK = nullptr; +#else + void * fp_vkCreateMacOSSurfaceMVK{}; +#endif +#if (defined(VK_EXT_metal_surface)) + PFN_vkCreateMetalSurfaceEXT fp_vkCreateMetalSurfaceEXT = nullptr; +#else + void * fp_vkCreateMetalSurfaceEXT{}; +#endif +#if (defined(VK_QNX_screen_surface)) + PFN_vkCreateScreenSurfaceQNX fp_vkCreateScreenSurfaceQNX = nullptr; +#else + void * fp_vkCreateScreenSurfaceQNX{}; +#endif +#if (defined(VK_GGP_stream_descriptor_surface)) + PFN_vkCreateStreamDescriptorSurfaceGGP fp_vkCreateStreamDescriptorSurfaceGGP = nullptr; +#else + void * fp_vkCreateStreamDescriptorSurfaceGGP{}; +#endif +#if (defined(VK_OHOS_surface)) + PFN_vkCreateSurfaceOHOS fp_vkCreateSurfaceOHOS = nullptr; +#else + void * fp_vkCreateSurfaceOHOS{}; +#endif +#if (defined(VK_NN_vi_surface)) + PFN_vkCreateViSurfaceNN fp_vkCreateViSurfaceNN = nullptr; +#else + void * fp_vkCreateViSurfaceNN{}; +#endif +#if (defined(VK_KHR_wayland_surface)) + PFN_vkCreateWaylandSurfaceKHR fp_vkCreateWaylandSurfaceKHR = nullptr; +#else + void * fp_vkCreateWaylandSurfaceKHR{}; +#endif +#if (defined(VK_KHR_win32_surface)) + PFN_vkCreateWin32SurfaceKHR fp_vkCreateWin32SurfaceKHR = nullptr; +#else + void * fp_vkCreateWin32SurfaceKHR{}; +#endif +#if (defined(VK_KHR_xcb_surface)) + PFN_vkCreateXcbSurfaceKHR fp_vkCreateXcbSurfaceKHR = nullptr; +#else + void * fp_vkCreateXcbSurfaceKHR{}; +#endif +#if (defined(VK_KHR_xlib_surface)) + PFN_vkCreateXlibSurfaceKHR fp_vkCreateXlibSurfaceKHR = nullptr; +#else + void * fp_vkCreateXlibSurfaceKHR{}; +#endif +#if (defined(VK_EXT_debug_report)) + PFN_vkDebugReportMessageEXT fp_vkDebugReportMessageEXT = nullptr; +#else + void * fp_vkDebugReportMessageEXT{}; +#endif +#if (defined(VK_EXT_debug_report)) + PFN_vkDestroyDebugReportCallbackEXT fp_vkDestroyDebugReportCallbackEXT = nullptr; +#else + void * fp_vkDestroyDebugReportCallbackEXT{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkDestroyDebugUtilsMessengerEXT fp_vkDestroyDebugUtilsMessengerEXT = nullptr; +#else + void * fp_vkDestroyDebugUtilsMessengerEXT{}; +#endif + PFN_vkDestroyInstance fp_vkDestroyInstance = nullptr; +#if (defined(VK_KHR_surface)) + PFN_vkDestroySurfaceKHR fp_vkDestroySurfaceKHR = nullptr; +#else + void * fp_vkDestroySurfaceKHR{}; +#endif + PFN_vkEnumerateDeviceExtensionProperties fp_vkEnumerateDeviceExtensionProperties = nullptr; + PFN_vkEnumerateDeviceLayerProperties fp_vkEnumerateDeviceLayerProperties = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkEnumeratePhysicalDeviceGroups fp_vkEnumeratePhysicalDeviceGroups = nullptr; +#else + void * fp_vkEnumeratePhysicalDeviceGroups{}; +#endif +#if (defined(VK_KHR_device_group_creation)) + PFN_vkEnumeratePhysicalDeviceGroupsKHR fp_vkEnumeratePhysicalDeviceGroupsKHR = nullptr; +#else + void * fp_vkEnumeratePhysicalDeviceGroupsKHR{}; +#endif +#if (defined(VK_KHR_performance_query)) + PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR fp_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = nullptr; +#else + void * fp_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR{}; +#endif + PFN_vkEnumeratePhysicalDevices fp_vkEnumeratePhysicalDevices = nullptr; +#if (defined(VK_KHR_get_display_properties2)) + PFN_vkGetDisplayModeProperties2KHR fp_vkGetDisplayModeProperties2KHR = nullptr; +#else + void * fp_vkGetDisplayModeProperties2KHR{}; +#endif +#if (defined(VK_KHR_display)) + PFN_vkGetDisplayModePropertiesKHR fp_vkGetDisplayModePropertiesKHR = nullptr; +#else + void * fp_vkGetDisplayModePropertiesKHR{}; +#endif +#if (defined(VK_KHR_get_display_properties2)) + PFN_vkGetDisplayPlaneCapabilities2KHR fp_vkGetDisplayPlaneCapabilities2KHR = nullptr; +#else + void * fp_vkGetDisplayPlaneCapabilities2KHR{}; +#endif +#if (defined(VK_KHR_display)) + PFN_vkGetDisplayPlaneCapabilitiesKHR fp_vkGetDisplayPlaneCapabilitiesKHR = nullptr; +#else + void * fp_vkGetDisplayPlaneCapabilitiesKHR{}; +#endif +#if (defined(VK_KHR_display)) + PFN_vkGetDisplayPlaneSupportedDisplaysKHR fp_vkGetDisplayPlaneSupportedDisplaysKHR = nullptr; +#else + void * fp_vkGetDisplayPlaneSupportedDisplaysKHR{}; +#endif +#if (defined(VK_EXT_acquire_drm_display)) + PFN_vkGetDrmDisplayEXT fp_vkGetDrmDisplayEXT = nullptr; +#else + void * fp_vkGetDrmDisplayEXT{}; +#endif + PFN_vkGetInstanceProcAddr fp_vkGetInstanceProcAddr = nullptr; +#if (defined(VK_EXT_calibrated_timestamps)) + PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT fp_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = nullptr; +#else + void * fp_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT{}; +#endif +#if (defined(VK_KHR_calibrated_timestamps)) + PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR fp_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR{}; +#endif +#if (defined(VK_NV_cooperative_matrix2)) + PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV fp_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV = nullptr; +#else + void * fp_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV{}; +#endif +#if (defined(VK_KHR_cooperative_matrix)) + PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR fp_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR{}; +#endif +#if (defined(VK_NV_cooperative_matrix)) + PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV fp_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = nullptr; +#else + void * fp_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV{}; +#endif +#if (defined(VK_NV_cooperative_vector)) + PFN_vkGetPhysicalDeviceCooperativeVectorPropertiesNV fp_vkGetPhysicalDeviceCooperativeVectorPropertiesNV = nullptr; +#else + void * fp_vkGetPhysicalDeviceCooperativeVectorPropertiesNV{}; +#endif +#if (defined(VK_EXT_directfb_surface)) + PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT fp_vkGetPhysicalDeviceDirectFBPresentationSupportEXT = nullptr; +#else + void * fp_vkGetPhysicalDeviceDirectFBPresentationSupportEXT{}; +#endif +#if (defined(VK_KHR_get_display_properties2)) + PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR fp_vkGetPhysicalDeviceDisplayPlaneProperties2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceDisplayPlaneProperties2KHR{}; +#endif +#if (defined(VK_KHR_display)) + PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR fp_vkGetPhysicalDeviceDisplayPlanePropertiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceDisplayPlanePropertiesKHR{}; +#endif +#if (defined(VK_KHR_get_display_properties2)) + PFN_vkGetPhysicalDeviceDisplayProperties2KHR fp_vkGetPhysicalDeviceDisplayProperties2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceDisplayProperties2KHR{}; +#endif +#if (defined(VK_KHR_display)) + PFN_vkGetPhysicalDeviceDisplayPropertiesKHR fp_vkGetPhysicalDeviceDisplayPropertiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceDisplayPropertiesKHR{}; +#endif +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceExternalBufferProperties fp_vkGetPhysicalDeviceExternalBufferProperties = nullptr; +#else + void * fp_vkGetPhysicalDeviceExternalBufferProperties{}; +#endif +#if (defined(VK_KHR_external_memory_capabilities)) + PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR fp_vkGetPhysicalDeviceExternalBufferPropertiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceExternalBufferPropertiesKHR{}; +#endif +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceExternalFenceProperties fp_vkGetPhysicalDeviceExternalFenceProperties = nullptr; +#else + void * fp_vkGetPhysicalDeviceExternalFenceProperties{}; +#endif +#if (defined(VK_KHR_external_fence_capabilities)) + PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR fp_vkGetPhysicalDeviceExternalFencePropertiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceExternalFencePropertiesKHR{}; +#endif +#if (defined(VK_NV_external_memory_capabilities)) + PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV fp_vkGetPhysicalDeviceExternalImageFormatPropertiesNV = nullptr; +#else + void * fp_vkGetPhysicalDeviceExternalImageFormatPropertiesNV{}; +#endif +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceExternalSemaphoreProperties fp_vkGetPhysicalDeviceExternalSemaphoreProperties = nullptr; +#else + void * fp_vkGetPhysicalDeviceExternalSemaphoreProperties{}; +#endif +#if (defined(VK_KHR_external_semaphore_capabilities)) + PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR fp_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkGetPhysicalDeviceExternalTensorPropertiesARM fp_vkGetPhysicalDeviceExternalTensorPropertiesARM = nullptr; +#else + void * fp_vkGetPhysicalDeviceExternalTensorPropertiesARM{}; +#endif + PFN_vkGetPhysicalDeviceFeatures fp_vkGetPhysicalDeviceFeatures = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceFeatures2 fp_vkGetPhysicalDeviceFeatures2 = nullptr; +#else + void * fp_vkGetPhysicalDeviceFeatures2{}; +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + PFN_vkGetPhysicalDeviceFeatures2KHR fp_vkGetPhysicalDeviceFeatures2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceFeatures2KHR{}; +#endif + PFN_vkGetPhysicalDeviceFormatProperties fp_vkGetPhysicalDeviceFormatProperties = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceFormatProperties2 fp_vkGetPhysicalDeviceFormatProperties2 = nullptr; +#else + void * fp_vkGetPhysicalDeviceFormatProperties2{}; +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + PFN_vkGetPhysicalDeviceFormatProperties2KHR fp_vkGetPhysicalDeviceFormatProperties2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceFormatProperties2KHR{}; +#endif +#if (defined(VK_KHR_fragment_shading_rate)) + PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR fp_vkGetPhysicalDeviceFragmentShadingRatesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceFragmentShadingRatesKHR{}; +#endif + PFN_vkGetPhysicalDeviceImageFormatProperties fp_vkGetPhysicalDeviceImageFormatProperties = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceImageFormatProperties2 fp_vkGetPhysicalDeviceImageFormatProperties2 = nullptr; +#else + void * fp_vkGetPhysicalDeviceImageFormatProperties2{}; +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + PFN_vkGetPhysicalDeviceImageFormatProperties2KHR fp_vkGetPhysicalDeviceImageFormatProperties2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceImageFormatProperties2KHR{}; +#endif + PFN_vkGetPhysicalDeviceMemoryProperties fp_vkGetPhysicalDeviceMemoryProperties = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceMemoryProperties2 fp_vkGetPhysicalDeviceMemoryProperties2 = nullptr; +#else + void * fp_vkGetPhysicalDeviceMemoryProperties2{}; +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + PFN_vkGetPhysicalDeviceMemoryProperties2KHR fp_vkGetPhysicalDeviceMemoryProperties2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceMemoryProperties2KHR{}; +#endif +#if (defined(VK_EXT_sample_locations)) + PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT fp_vkGetPhysicalDeviceMultisamplePropertiesEXT = nullptr; +#else + void * fp_vkGetPhysicalDeviceMultisamplePropertiesEXT{}; +#endif +#if (defined(VK_NV_optical_flow)) + PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV fp_vkGetPhysicalDeviceOpticalFlowImageFormatsNV = nullptr; +#else + void * fp_vkGetPhysicalDeviceOpticalFlowImageFormatsNV{}; +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + PFN_vkGetPhysicalDevicePresentRectanglesKHR fp_vkGetPhysicalDevicePresentRectanglesKHR = nullptr; +#else + void * fp_vkGetPhysicalDevicePresentRectanglesKHR{}; +#endif + PFN_vkGetPhysicalDeviceProperties fp_vkGetPhysicalDeviceProperties = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceProperties2 fp_vkGetPhysicalDeviceProperties2 = nullptr; +#else + void * fp_vkGetPhysicalDeviceProperties2{}; +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + PFN_vkGetPhysicalDeviceProperties2KHR fp_vkGetPhysicalDeviceProperties2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceProperties2KHR{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM fp_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM = nullptr; +#else + void * fp_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM fp_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM = nullptr; +#else + void * fp_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM{}; +#endif +#if (defined(VK_KHR_performance_query)) + PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR fp_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR{}; +#endif + PFN_vkGetPhysicalDeviceQueueFamilyProperties fp_vkGetPhysicalDeviceQueueFamilyProperties = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceQueueFamilyProperties2 fp_vkGetPhysicalDeviceQueueFamilyProperties2 = nullptr; +#else + void * fp_vkGetPhysicalDeviceQueueFamilyProperties2{}; +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR fp_vkGetPhysicalDeviceQueueFamilyProperties2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceQueueFamilyProperties2KHR{}; +#endif +#if (defined(VK_QNX_screen_surface)) + PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX fp_vkGetPhysicalDeviceScreenPresentationSupportQNX = nullptr; +#else + void * fp_vkGetPhysicalDeviceScreenPresentationSupportQNX{}; +#endif + PFN_vkGetPhysicalDeviceSparseImageFormatProperties fp_vkGetPhysicalDeviceSparseImageFormatProperties = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 fp_vkGetPhysicalDeviceSparseImageFormatProperties2 = nullptr; +#else + void * fp_vkGetPhysicalDeviceSparseImageFormatProperties2{}; +#endif +#if (defined(VK_KHR_get_physical_device_properties2)) + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR fp_vkGetPhysicalDeviceSparseImageFormatProperties2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceSparseImageFormatProperties2KHR{}; +#endif +#if (defined(VK_NV_coverage_reduction_mode)) + PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV fp_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = nullptr; +#else + void * fp_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV{}; +#endif +#if (defined(VK_EXT_display_surface_counter)) + PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT fp_vkGetPhysicalDeviceSurfaceCapabilities2EXT = nullptr; +#else + void * fp_vkGetPhysicalDeviceSurfaceCapabilities2EXT{}; +#endif +#if (defined(VK_KHR_get_surface_capabilities2)) + PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR fp_vkGetPhysicalDeviceSurfaceCapabilities2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceSurfaceCapabilities2KHR{}; +#endif +#if (defined(VK_KHR_surface)) + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceSurfaceCapabilitiesKHR{}; +#endif +#if (defined(VK_KHR_get_surface_capabilities2)) + PFN_vkGetPhysicalDeviceSurfaceFormats2KHR fp_vkGetPhysicalDeviceSurfaceFormats2KHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceSurfaceFormats2KHR{}; +#endif +#if (defined(VK_KHR_surface)) + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR fp_vkGetPhysicalDeviceSurfaceFormatsKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceSurfaceFormatsKHR{}; +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT fp_vkGetPhysicalDeviceSurfacePresentModes2EXT = nullptr; +#else + void * fp_vkGetPhysicalDeviceSurfacePresentModes2EXT{}; +#endif +#if (defined(VK_KHR_surface)) + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR fp_vkGetPhysicalDeviceSurfacePresentModesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceSurfacePresentModesKHR{}; +#endif +#if (defined(VK_KHR_surface)) + PFN_vkGetPhysicalDeviceSurfaceSupportKHR fp_vkGetPhysicalDeviceSurfaceSupportKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceSurfaceSupportKHR{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkGetPhysicalDeviceToolProperties fp_vkGetPhysicalDeviceToolProperties = nullptr; +#else + void * fp_vkGetPhysicalDeviceToolProperties{}; +#endif +#if (defined(VK_EXT_tooling_info)) + PFN_vkGetPhysicalDeviceToolPropertiesEXT fp_vkGetPhysicalDeviceToolPropertiesEXT = nullptr; +#else + void * fp_vkGetPhysicalDeviceToolPropertiesEXT{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR fp_vkGetPhysicalDeviceVideoCapabilitiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceVideoCapabilitiesKHR{}; +#endif +#if (defined(VK_KHR_video_encode_queue)) + PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR fp_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR fp_vkGetPhysicalDeviceVideoFormatPropertiesKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceVideoFormatPropertiesKHR{}; +#endif +#if (defined(VK_KHR_wayland_surface)) + PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR fp_vkGetPhysicalDeviceWaylandPresentationSupportKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceWaylandPresentationSupportKHR{}; +#endif +#if (defined(VK_KHR_win32_surface)) + PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR fp_vkGetPhysicalDeviceWin32PresentationSupportKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceWin32PresentationSupportKHR{}; +#endif +#if (defined(VK_KHR_xcb_surface)) + PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR fp_vkGetPhysicalDeviceXcbPresentationSupportKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceXcbPresentationSupportKHR{}; +#endif +#if (defined(VK_KHR_xlib_surface)) + PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR fp_vkGetPhysicalDeviceXlibPresentationSupportKHR = nullptr; +#else + void * fp_vkGetPhysicalDeviceXlibPresentationSupportKHR{}; +#endif +#if (defined(VK_EXT_acquire_xlib_display)) + PFN_vkGetRandROutputDisplayEXT fp_vkGetRandROutputDisplayEXT = nullptr; +#else + void * fp_vkGetRandROutputDisplayEXT{}; +#endif +#if (defined(VK_NV_acquire_winrt_display)) + PFN_vkGetWinrtDisplayNV fp_vkGetWinrtDisplayNV = nullptr; +#else + void * fp_vkGetWinrtDisplayNV{}; +#endif +#if (defined(VK_EXT_direct_mode_display)) + PFN_vkReleaseDisplayEXT fp_vkReleaseDisplayEXT = nullptr; +#else + void * fp_vkReleaseDisplayEXT{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkSubmitDebugUtilsMessageEXT fp_vkSubmitDebugUtilsMessageEXT = nullptr; +#else + void * fp_vkSubmitDebugUtilsMessageEXT{}; +#endif + bool is_populated() const { return populated; } + VkInstance instance = VK_NULL_HANDLE; +private: + bool populated = false; +}; + +struct DispatchTable { + DispatchTable() = default; + DispatchTable(VkDevice device, PFN_vkGetDeviceProcAddr procAddr) : device(device), populated(true) { +#if (defined(VK_EXT_full_screen_exclusive)) + fp_vkAcquireFullScreenExclusiveModeEXT = reinterpret_cast(procAddr(device, "vkAcquireFullScreenExclusiveModeEXT")); +#endif +#if (defined(VK_OHOS_native_buffer)) + fp_vkAcquireImageOHOS = reinterpret_cast(procAddr(device, "vkAcquireImageOHOS")); +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + fp_vkAcquireNextImage2KHR = reinterpret_cast(procAddr(device, "vkAcquireNextImage2KHR")); +#endif +#if (defined(VK_KHR_swapchain)) + fp_vkAcquireNextImageKHR = reinterpret_cast(procAddr(device, "vkAcquireNextImageKHR")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkAcquirePerformanceConfigurationINTEL = reinterpret_cast(procAddr(device, "vkAcquirePerformanceConfigurationINTEL")); +#endif +#if (defined(VK_KHR_performance_query)) + fp_vkAcquireProfilingLockKHR = reinterpret_cast(procAddr(device, "vkAcquireProfilingLockKHR")); +#endif + fp_vkAllocateCommandBuffers = reinterpret_cast(procAddr(device, "vkAllocateCommandBuffers")); + fp_vkAllocateDescriptorSets = reinterpret_cast(procAddr(device, "vkAllocateDescriptorSets")); + fp_vkAllocateMemory = reinterpret_cast(procAddr(device, "vkAllocateMemory")); +#if (defined(VK_AMD_anti_lag)) + fp_vkAntiLagUpdateAMD = reinterpret_cast(procAddr(device, "vkAntiLagUpdateAMD")); +#endif + fp_vkBeginCommandBuffer = reinterpret_cast(procAddr(device, "vkBeginCommandBuffer")); +#if (defined(VK_NV_ray_tracing)) + fp_vkBindAccelerationStructureMemoryNV = reinterpret_cast(procAddr(device, "vkBindAccelerationStructureMemoryNV")); +#endif + fp_vkBindBufferMemory = reinterpret_cast(procAddr(device, "vkBindBufferMemory")); +#if (defined(VK_VERSION_1_1)) + fp_vkBindBufferMemory2 = reinterpret_cast(procAddr(device, "vkBindBufferMemory2")); +#endif +#if (defined(VK_KHR_bind_memory2)) + fp_vkBindBufferMemory2KHR = reinterpret_cast(procAddr(device, "vkBindBufferMemory2KHR")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkBindDataGraphPipelineSessionMemoryARM = reinterpret_cast(procAddr(device, "vkBindDataGraphPipelineSessionMemoryARM")); +#endif + fp_vkBindImageMemory = reinterpret_cast(procAddr(device, "vkBindImageMemory")); +#if (defined(VK_VERSION_1_1)) + fp_vkBindImageMemory2 = reinterpret_cast(procAddr(device, "vkBindImageMemory2")); +#endif +#if (defined(VK_KHR_bind_memory2)) + fp_vkBindImageMemory2KHR = reinterpret_cast(procAddr(device, "vkBindImageMemory2KHR")); +#endif +#if (defined(VK_NV_optical_flow)) + fp_vkBindOpticalFlowSessionImageNV = reinterpret_cast(procAddr(device, "vkBindOpticalFlowSessionImageNV")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkBindTensorMemoryARM = reinterpret_cast(procAddr(device, "vkBindTensorMemoryARM")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkBindVideoSessionMemoryKHR = reinterpret_cast(procAddr(device, "vkBindVideoSessionMemoryKHR")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkBuildAccelerationStructuresKHR = reinterpret_cast(procAddr(device, "vkBuildAccelerationStructuresKHR")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkBuildMicromapsEXT = reinterpret_cast(procAddr(device, "vkBuildMicromapsEXT")); +#endif +#if (defined(VK_EXT_conditional_rendering)) + fp_vkCmdBeginConditionalRenderingEXT = reinterpret_cast(procAddr(device, "vkCmdBeginConditionalRenderingEXT")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkCmdBeginDebugUtilsLabelEXT = reinterpret_cast(procAddr(device, "vkCmdBeginDebugUtilsLabelEXT")); +#endif +#if (defined(VK_QCOM_tile_shading)) + fp_vkCmdBeginPerTileExecutionQCOM = reinterpret_cast(procAddr(device, "vkCmdBeginPerTileExecutionQCOM")); +#endif + fp_vkCmdBeginQuery = reinterpret_cast(procAddr(device, "vkCmdBeginQuery")); +#if (defined(VK_EXT_transform_feedback)) + fp_vkCmdBeginQueryIndexedEXT = reinterpret_cast(procAddr(device, "vkCmdBeginQueryIndexedEXT")); +#endif + fp_vkCmdBeginRenderPass = reinterpret_cast(procAddr(device, "vkCmdBeginRenderPass")); +#if (defined(VK_VERSION_1_2)) + fp_vkCmdBeginRenderPass2 = reinterpret_cast(procAddr(device, "vkCmdBeginRenderPass2")); +#endif +#if (defined(VK_KHR_create_renderpass2)) + fp_vkCmdBeginRenderPass2KHR = reinterpret_cast(procAddr(device, "vkCmdBeginRenderPass2KHR")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdBeginRendering = reinterpret_cast(procAddr(device, "vkCmdBeginRendering")); +#endif +#if (defined(VK_KHR_dynamic_rendering)) + fp_vkCmdBeginRenderingKHR = reinterpret_cast(procAddr(device, "vkCmdBeginRenderingKHR")); +#endif +#if (defined(VK_EXT_transform_feedback)) + fp_vkCmdBeginTransformFeedbackEXT = reinterpret_cast(procAddr(device, "vkCmdBeginTransformFeedbackEXT")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkCmdBeginVideoCodingKHR = reinterpret_cast(procAddr(device, "vkCmdBeginVideoCodingKHR")); +#endif +#if (defined(VK_KHR_maintenance6)) + fp_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT = reinterpret_cast(procAddr(device, "vkCmdBindDescriptorBufferEmbeddedSamplers2EXT")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkCmdBindDescriptorBufferEmbeddedSamplersEXT = reinterpret_cast(procAddr(device, "vkCmdBindDescriptorBufferEmbeddedSamplersEXT")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkCmdBindDescriptorBuffersEXT = reinterpret_cast(procAddr(device, "vkCmdBindDescriptorBuffersEXT")); +#endif + fp_vkCmdBindDescriptorSets = reinterpret_cast(procAddr(device, "vkCmdBindDescriptorSets")); +#if (defined(VK_VERSION_1_4)) + fp_vkCmdBindDescriptorSets2 = reinterpret_cast(procAddr(device, "vkCmdBindDescriptorSets2")); +#endif +#if (defined(VK_KHR_maintenance6)) + fp_vkCmdBindDescriptorSets2KHR = reinterpret_cast(procAddr(device, "vkCmdBindDescriptorSets2KHR")); +#endif + fp_vkCmdBindIndexBuffer = reinterpret_cast(procAddr(device, "vkCmdBindIndexBuffer")); +#if (defined(VK_VERSION_1_4)) + fp_vkCmdBindIndexBuffer2 = reinterpret_cast(procAddr(device, "vkCmdBindIndexBuffer2")); +#endif +#if (defined(VK_KHR_maintenance5)) + fp_vkCmdBindIndexBuffer2KHR = reinterpret_cast(procAddr(device, "vkCmdBindIndexBuffer2KHR")); +#endif +#if (defined(VK_HUAWEI_invocation_mask)) + fp_vkCmdBindInvocationMaskHUAWEI = reinterpret_cast(procAddr(device, "vkCmdBindInvocationMaskHUAWEI")); +#endif + fp_vkCmdBindPipeline = reinterpret_cast(procAddr(device, "vkCmdBindPipeline")); +#if (defined(VK_NV_device_generated_commands)) + fp_vkCmdBindPipelineShaderGroupNV = reinterpret_cast(procAddr(device, "vkCmdBindPipelineShaderGroupNV")); +#endif +#if (defined(VK_EXT_shader_object)) + fp_vkCmdBindShadersEXT = reinterpret_cast(procAddr(device, "vkCmdBindShadersEXT")); +#endif +#if (defined(VK_NV_shading_rate_image)) + fp_vkCmdBindShadingRateImageNV = reinterpret_cast(procAddr(device, "vkCmdBindShadingRateImageNV")); +#endif +#if (defined(VK_QCOM_tile_memory_heap)) + fp_vkCmdBindTileMemoryQCOM = reinterpret_cast(procAddr(device, "vkCmdBindTileMemoryQCOM")); +#endif +#if (defined(VK_EXT_transform_feedback)) + fp_vkCmdBindTransformFeedbackBuffersEXT = reinterpret_cast(procAddr(device, "vkCmdBindTransformFeedbackBuffersEXT")); +#endif + fp_vkCmdBindVertexBuffers = reinterpret_cast(procAddr(device, "vkCmdBindVertexBuffers")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdBindVertexBuffers2 = reinterpret_cast(procAddr(device, "vkCmdBindVertexBuffers2")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdBindVertexBuffers2EXT = reinterpret_cast(procAddr(device, "vkCmdBindVertexBuffers2EXT")); +#endif + fp_vkCmdBlitImage = reinterpret_cast(procAddr(device, "vkCmdBlitImage")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdBlitImage2 = reinterpret_cast(procAddr(device, "vkCmdBlitImage2")); +#endif +#if (defined(VK_KHR_copy_commands2)) + fp_vkCmdBlitImage2KHR = reinterpret_cast(procAddr(device, "vkCmdBlitImage2KHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkCmdBuildAccelerationStructureNV = reinterpret_cast(procAddr(device, "vkCmdBuildAccelerationStructureNV")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCmdBuildAccelerationStructuresIndirectKHR = reinterpret_cast(procAddr(device, "vkCmdBuildAccelerationStructuresIndirectKHR")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCmdBuildAccelerationStructuresKHR = reinterpret_cast(procAddr(device, "vkCmdBuildAccelerationStructuresKHR")); +#endif +#if (defined(VK_NV_cluster_acceleration_structure)) + fp_vkCmdBuildClusterAccelerationStructureIndirectNV = reinterpret_cast(procAddr(device, "vkCmdBuildClusterAccelerationStructureIndirectNV")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCmdBuildMicromapsEXT = reinterpret_cast(procAddr(device, "vkCmdBuildMicromapsEXT")); +#endif +#if (defined(VK_NV_partitioned_acceleration_structure)) + fp_vkCmdBuildPartitionedAccelerationStructuresNV = reinterpret_cast(procAddr(device, "vkCmdBuildPartitionedAccelerationStructuresNV")); +#endif + fp_vkCmdClearAttachments = reinterpret_cast(procAddr(device, "vkCmdClearAttachments")); + fp_vkCmdClearColorImage = reinterpret_cast(procAddr(device, "vkCmdClearColorImage")); + fp_vkCmdClearDepthStencilImage = reinterpret_cast(procAddr(device, "vkCmdClearDepthStencilImage")); +#if (defined(VK_KHR_video_queue)) + fp_vkCmdControlVideoCodingKHR = reinterpret_cast(procAddr(device, "vkCmdControlVideoCodingKHR")); +#endif +#if (defined(VK_NV_cooperative_vector)) + fp_vkCmdConvertCooperativeVectorMatrixNV = reinterpret_cast(procAddr(device, "vkCmdConvertCooperativeVectorMatrixNV")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCmdCopyAccelerationStructureKHR = reinterpret_cast(procAddr(device, "vkCmdCopyAccelerationStructureKHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkCmdCopyAccelerationStructureNV = reinterpret_cast(procAddr(device, "vkCmdCopyAccelerationStructureNV")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCmdCopyAccelerationStructureToMemoryKHR = reinterpret_cast(procAddr(device, "vkCmdCopyAccelerationStructureToMemoryKHR")); +#endif + fp_vkCmdCopyBuffer = reinterpret_cast(procAddr(device, "vkCmdCopyBuffer")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdCopyBuffer2 = reinterpret_cast(procAddr(device, "vkCmdCopyBuffer2")); +#endif +#if (defined(VK_KHR_copy_commands2)) + fp_vkCmdCopyBuffer2KHR = reinterpret_cast(procAddr(device, "vkCmdCopyBuffer2KHR")); +#endif + fp_vkCmdCopyBufferToImage = reinterpret_cast(procAddr(device, "vkCmdCopyBufferToImage")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdCopyBufferToImage2 = reinterpret_cast(procAddr(device, "vkCmdCopyBufferToImage2")); +#endif +#if (defined(VK_KHR_copy_commands2)) + fp_vkCmdCopyBufferToImage2KHR = reinterpret_cast(procAddr(device, "vkCmdCopyBufferToImage2KHR")); +#endif + fp_vkCmdCopyImage = reinterpret_cast(procAddr(device, "vkCmdCopyImage")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdCopyImage2 = reinterpret_cast(procAddr(device, "vkCmdCopyImage2")); +#endif +#if (defined(VK_KHR_copy_commands2)) + fp_vkCmdCopyImage2KHR = reinterpret_cast(procAddr(device, "vkCmdCopyImage2KHR")); +#endif + fp_vkCmdCopyImageToBuffer = reinterpret_cast(procAddr(device, "vkCmdCopyImageToBuffer")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdCopyImageToBuffer2 = reinterpret_cast(procAddr(device, "vkCmdCopyImageToBuffer2")); +#endif +#if (defined(VK_KHR_copy_commands2)) + fp_vkCmdCopyImageToBuffer2KHR = reinterpret_cast(procAddr(device, "vkCmdCopyImageToBuffer2KHR")); +#endif +#if (defined(VK_KHR_copy_memory_indirect)) + fp_vkCmdCopyMemoryIndirectKHR = reinterpret_cast(procAddr(device, "vkCmdCopyMemoryIndirectKHR")); +#endif +#if (defined(VK_NV_copy_memory_indirect)) + fp_vkCmdCopyMemoryIndirectNV = reinterpret_cast(procAddr(device, "vkCmdCopyMemoryIndirectNV")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCmdCopyMemoryToAccelerationStructureKHR = reinterpret_cast(procAddr(device, "vkCmdCopyMemoryToAccelerationStructureKHR")); +#endif +#if (defined(VK_KHR_copy_memory_indirect)) + fp_vkCmdCopyMemoryToImageIndirectKHR = reinterpret_cast(procAddr(device, "vkCmdCopyMemoryToImageIndirectKHR")); +#endif +#if (defined(VK_NV_copy_memory_indirect)) + fp_vkCmdCopyMemoryToImageIndirectNV = reinterpret_cast(procAddr(device, "vkCmdCopyMemoryToImageIndirectNV")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCmdCopyMemoryToMicromapEXT = reinterpret_cast(procAddr(device, "vkCmdCopyMemoryToMicromapEXT")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCmdCopyMicromapEXT = reinterpret_cast(procAddr(device, "vkCmdCopyMicromapEXT")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCmdCopyMicromapToMemoryEXT = reinterpret_cast(procAddr(device, "vkCmdCopyMicromapToMemoryEXT")); +#endif + fp_vkCmdCopyQueryPoolResults = reinterpret_cast(procAddr(device, "vkCmdCopyQueryPoolResults")); +#if (defined(VK_ARM_tensors)) + fp_vkCmdCopyTensorARM = reinterpret_cast(procAddr(device, "vkCmdCopyTensorARM")); +#endif +#if (defined(VK_NVX_binary_import)) + fp_vkCmdCuLaunchKernelNVX = reinterpret_cast(procAddr(device, "vkCmdCuLaunchKernelNVX")); +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + fp_vkCmdCudaLaunchKernelNV = reinterpret_cast(procAddr(device, "vkCmdCudaLaunchKernelNV")); +#endif +#if (defined(VK_EXT_debug_marker)) + fp_vkCmdDebugMarkerBeginEXT = reinterpret_cast(procAddr(device, "vkCmdDebugMarkerBeginEXT")); +#endif +#if (defined(VK_EXT_debug_marker)) + fp_vkCmdDebugMarkerEndEXT = reinterpret_cast(procAddr(device, "vkCmdDebugMarkerEndEXT")); +#endif +#if (defined(VK_EXT_debug_marker)) + fp_vkCmdDebugMarkerInsertEXT = reinterpret_cast(procAddr(device, "vkCmdDebugMarkerInsertEXT")); +#endif +#if (defined(VK_KHR_video_decode_queue)) + fp_vkCmdDecodeVideoKHR = reinterpret_cast(procAddr(device, "vkCmdDecodeVideoKHR")); +#endif +#if (defined(VK_EXT_memory_decompression)) + fp_vkCmdDecompressMemoryEXT = reinterpret_cast(procAddr(device, "vkCmdDecompressMemoryEXT")); +#endif +#if (defined(VK_EXT_memory_decompression)) + fp_vkCmdDecompressMemoryIndirectCountEXT = reinterpret_cast(procAddr(device, "vkCmdDecompressMemoryIndirectCountEXT")); +#endif +#if (defined(VK_NV_memory_decompression)) + fp_vkCmdDecompressMemoryIndirectCountNV = reinterpret_cast(procAddr(device, "vkCmdDecompressMemoryIndirectCountNV")); +#endif +#if (defined(VK_NV_memory_decompression)) + fp_vkCmdDecompressMemoryNV = reinterpret_cast(procAddr(device, "vkCmdDecompressMemoryNV")); +#endif + fp_vkCmdDispatch = reinterpret_cast(procAddr(device, "vkCmdDispatch")); +#if (defined(VK_VERSION_1_1)) + fp_vkCmdDispatchBase = reinterpret_cast(procAddr(device, "vkCmdDispatchBase")); +#endif +#if (defined(VK_KHR_device_group)) + fp_vkCmdDispatchBaseKHR = reinterpret_cast(procAddr(device, "vkCmdDispatchBaseKHR")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkCmdDispatchDataGraphARM = reinterpret_cast(procAddr(device, "vkCmdDispatchDataGraphARM")); +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + fp_vkCmdDispatchGraphAMDX = reinterpret_cast(procAddr(device, "vkCmdDispatchGraphAMDX")); +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + fp_vkCmdDispatchGraphIndirectAMDX = reinterpret_cast(procAddr(device, "vkCmdDispatchGraphIndirectAMDX")); +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + fp_vkCmdDispatchGraphIndirectCountAMDX = reinterpret_cast(procAddr(device, "vkCmdDispatchGraphIndirectCountAMDX")); +#endif + fp_vkCmdDispatchIndirect = reinterpret_cast(procAddr(device, "vkCmdDispatchIndirect")); +#if (defined(VK_QCOM_tile_shading)) && VK_HEADER_VERSION >= 316 + fp_vkCmdDispatchTileQCOM = reinterpret_cast(procAddr(device, "vkCmdDispatchTileQCOM")); +#endif + fp_vkCmdDraw = reinterpret_cast(procAddr(device, "vkCmdDraw")); +#if (defined(VK_HUAWEI_cluster_culling_shader)) + fp_vkCmdDrawClusterHUAWEI = reinterpret_cast(procAddr(device, "vkCmdDrawClusterHUAWEI")); +#endif +#if (defined(VK_HUAWEI_cluster_culling_shader)) + fp_vkCmdDrawClusterIndirectHUAWEI = reinterpret_cast(procAddr(device, "vkCmdDrawClusterIndirectHUAWEI")); +#endif + fp_vkCmdDrawIndexed = reinterpret_cast(procAddr(device, "vkCmdDrawIndexed")); + fp_vkCmdDrawIndexedIndirect = reinterpret_cast(procAddr(device, "vkCmdDrawIndexedIndirect")); +#if (defined(VK_VERSION_1_2)) + fp_vkCmdDrawIndexedIndirectCount = reinterpret_cast(procAddr(device, "vkCmdDrawIndexedIndirectCount")); +#endif +#if (defined(VK_AMD_draw_indirect_count)) + fp_vkCmdDrawIndexedIndirectCountAMD = reinterpret_cast(procAddr(device, "vkCmdDrawIndexedIndirectCountAMD")); +#endif +#if (defined(VK_KHR_draw_indirect_count)) + fp_vkCmdDrawIndexedIndirectCountKHR = reinterpret_cast(procAddr(device, "vkCmdDrawIndexedIndirectCountKHR")); +#endif + fp_vkCmdDrawIndirect = reinterpret_cast(procAddr(device, "vkCmdDrawIndirect")); +#if (defined(VK_EXT_transform_feedback)) + fp_vkCmdDrawIndirectByteCountEXT = reinterpret_cast(procAddr(device, "vkCmdDrawIndirectByteCountEXT")); +#endif +#if (defined(VK_VERSION_1_2)) + fp_vkCmdDrawIndirectCount = reinterpret_cast(procAddr(device, "vkCmdDrawIndirectCount")); +#endif +#if (defined(VK_AMD_draw_indirect_count)) + fp_vkCmdDrawIndirectCountAMD = reinterpret_cast(procAddr(device, "vkCmdDrawIndirectCountAMD")); +#endif +#if (defined(VK_KHR_draw_indirect_count)) + fp_vkCmdDrawIndirectCountKHR = reinterpret_cast(procAddr(device, "vkCmdDrawIndirectCountKHR")); +#endif +#if (defined(VK_EXT_mesh_shader)) + fp_vkCmdDrawMeshTasksEXT = reinterpret_cast(procAddr(device, "vkCmdDrawMeshTasksEXT")); +#endif +#if (defined(VK_EXT_mesh_shader)) + fp_vkCmdDrawMeshTasksIndirectCountEXT = reinterpret_cast(procAddr(device, "vkCmdDrawMeshTasksIndirectCountEXT")); +#endif +#if (defined(VK_NV_mesh_shader)) + fp_vkCmdDrawMeshTasksIndirectCountNV = reinterpret_cast(procAddr(device, "vkCmdDrawMeshTasksIndirectCountNV")); +#endif +#if (defined(VK_EXT_mesh_shader)) + fp_vkCmdDrawMeshTasksIndirectEXT = reinterpret_cast(procAddr(device, "vkCmdDrawMeshTasksIndirectEXT")); +#endif +#if (defined(VK_NV_mesh_shader)) + fp_vkCmdDrawMeshTasksIndirectNV = reinterpret_cast(procAddr(device, "vkCmdDrawMeshTasksIndirectNV")); +#endif +#if (defined(VK_NV_mesh_shader)) + fp_vkCmdDrawMeshTasksNV = reinterpret_cast(procAddr(device, "vkCmdDrawMeshTasksNV")); +#endif +#if (defined(VK_EXT_multi_draw)) + fp_vkCmdDrawMultiEXT = reinterpret_cast(procAddr(device, "vkCmdDrawMultiEXT")); +#endif +#if (defined(VK_EXT_multi_draw)) + fp_vkCmdDrawMultiIndexedEXT = reinterpret_cast(procAddr(device, "vkCmdDrawMultiIndexedEXT")); +#endif +#if (defined(VK_KHR_video_encode_queue)) + fp_vkCmdEncodeVideoKHR = reinterpret_cast(procAddr(device, "vkCmdEncodeVideoKHR")); +#endif +#if (defined(VK_EXT_conditional_rendering)) + fp_vkCmdEndConditionalRenderingEXT = reinterpret_cast(procAddr(device, "vkCmdEndConditionalRenderingEXT")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkCmdEndDebugUtilsLabelEXT = reinterpret_cast(procAddr(device, "vkCmdEndDebugUtilsLabelEXT")); +#endif +#if (defined(VK_QCOM_tile_shading)) + fp_vkCmdEndPerTileExecutionQCOM = reinterpret_cast(procAddr(device, "vkCmdEndPerTileExecutionQCOM")); +#endif + fp_vkCmdEndQuery = reinterpret_cast(procAddr(device, "vkCmdEndQuery")); +#if (defined(VK_EXT_transform_feedback)) + fp_vkCmdEndQueryIndexedEXT = reinterpret_cast(procAddr(device, "vkCmdEndQueryIndexedEXT")); +#endif + fp_vkCmdEndRenderPass = reinterpret_cast(procAddr(device, "vkCmdEndRenderPass")); +#if (defined(VK_VERSION_1_2)) + fp_vkCmdEndRenderPass2 = reinterpret_cast(procAddr(device, "vkCmdEndRenderPass2")); +#endif +#if (defined(VK_KHR_create_renderpass2)) + fp_vkCmdEndRenderPass2KHR = reinterpret_cast(procAddr(device, "vkCmdEndRenderPass2KHR")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdEndRendering = reinterpret_cast(procAddr(device, "vkCmdEndRendering")); +#endif +#if (defined(VK_EXT_fragment_density_map_offset)) + fp_vkCmdEndRendering2EXT = reinterpret_cast(procAddr(device, "vkCmdEndRendering2EXT")); +#endif +#if (defined(VK_KHR_maintenance10)) + fp_vkCmdEndRendering2KHR = reinterpret_cast(procAddr(device, "vkCmdEndRendering2KHR")); +#endif +#if (defined(VK_KHR_dynamic_rendering)) + fp_vkCmdEndRenderingKHR = reinterpret_cast(procAddr(device, "vkCmdEndRenderingKHR")); +#endif +#if (defined(VK_EXT_transform_feedback)) + fp_vkCmdEndTransformFeedbackEXT = reinterpret_cast(procAddr(device, "vkCmdEndTransformFeedbackEXT")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkCmdEndVideoCodingKHR = reinterpret_cast(procAddr(device, "vkCmdEndVideoCodingKHR")); +#endif + fp_vkCmdExecuteCommands = reinterpret_cast(procAddr(device, "vkCmdExecuteCommands")); +#if (defined(VK_EXT_device_generated_commands)) + fp_vkCmdExecuteGeneratedCommandsEXT = reinterpret_cast(procAddr(device, "vkCmdExecuteGeneratedCommandsEXT")); +#endif +#if (defined(VK_NV_device_generated_commands)) + fp_vkCmdExecuteGeneratedCommandsNV = reinterpret_cast(procAddr(device, "vkCmdExecuteGeneratedCommandsNV")); +#endif + fp_vkCmdFillBuffer = reinterpret_cast(procAddr(device, "vkCmdFillBuffer")); +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + fp_vkCmdInitializeGraphScratchMemoryAMDX = reinterpret_cast(procAddr(device, "vkCmdInitializeGraphScratchMemoryAMDX")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkCmdInsertDebugUtilsLabelEXT = reinterpret_cast(procAddr(device, "vkCmdInsertDebugUtilsLabelEXT")); +#endif + fp_vkCmdNextSubpass = reinterpret_cast(procAddr(device, "vkCmdNextSubpass")); +#if (defined(VK_VERSION_1_2)) + fp_vkCmdNextSubpass2 = reinterpret_cast(procAddr(device, "vkCmdNextSubpass2")); +#endif +#if (defined(VK_KHR_create_renderpass2)) + fp_vkCmdNextSubpass2KHR = reinterpret_cast(procAddr(device, "vkCmdNextSubpass2KHR")); +#endif +#if (defined(VK_NV_optical_flow)) + fp_vkCmdOpticalFlowExecuteNV = reinterpret_cast(procAddr(device, "vkCmdOpticalFlowExecuteNV")); +#endif + fp_vkCmdPipelineBarrier = reinterpret_cast(procAddr(device, "vkCmdPipelineBarrier")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdPipelineBarrier2 = reinterpret_cast(procAddr(device, "vkCmdPipelineBarrier2")); +#endif +#if (defined(VK_KHR_synchronization2)) + fp_vkCmdPipelineBarrier2KHR = reinterpret_cast(procAddr(device, "vkCmdPipelineBarrier2KHR")); +#endif +#if (defined(VK_EXT_device_generated_commands)) + fp_vkCmdPreprocessGeneratedCommandsEXT = reinterpret_cast(procAddr(device, "vkCmdPreprocessGeneratedCommandsEXT")); +#endif +#if (defined(VK_NV_device_generated_commands)) + fp_vkCmdPreprocessGeneratedCommandsNV = reinterpret_cast(procAddr(device, "vkCmdPreprocessGeneratedCommandsNV")); +#endif + fp_vkCmdPushConstants = reinterpret_cast(procAddr(device, "vkCmdPushConstants")); +#if (defined(VK_VERSION_1_4)) + fp_vkCmdPushConstants2 = reinterpret_cast(procAddr(device, "vkCmdPushConstants2")); +#endif +#if (defined(VK_KHR_maintenance6)) + fp_vkCmdPushConstants2KHR = reinterpret_cast(procAddr(device, "vkCmdPushConstants2KHR")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCmdPushDescriptorSet = reinterpret_cast(procAddr(device, "vkCmdPushDescriptorSet")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCmdPushDescriptorSet2 = reinterpret_cast(procAddr(device, "vkCmdPushDescriptorSet2")); +#endif +#if (defined(VK_KHR_maintenance6)) + fp_vkCmdPushDescriptorSet2KHR = reinterpret_cast(procAddr(device, "vkCmdPushDescriptorSet2KHR")); +#endif +#if (defined(VK_KHR_push_descriptor)) + fp_vkCmdPushDescriptorSetKHR = reinterpret_cast(procAddr(device, "vkCmdPushDescriptorSetKHR")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCmdPushDescriptorSetWithTemplate = reinterpret_cast(procAddr(device, "vkCmdPushDescriptorSetWithTemplate")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCmdPushDescriptorSetWithTemplate2 = reinterpret_cast(procAddr(device, "vkCmdPushDescriptorSetWithTemplate2")); +#endif +#if (defined(VK_KHR_maintenance6)) + fp_vkCmdPushDescriptorSetWithTemplate2KHR = reinterpret_cast(procAddr(device, "vkCmdPushDescriptorSetWithTemplate2KHR")); +#endif +#if (defined(VK_KHR_push_descriptor) || defined(VK_KHR_descriptor_update_template)) + fp_vkCmdPushDescriptorSetWithTemplateKHR = reinterpret_cast(procAddr(device, "vkCmdPushDescriptorSetWithTemplateKHR")); +#endif + fp_vkCmdResetEvent = reinterpret_cast(procAddr(device, "vkCmdResetEvent")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdResetEvent2 = reinterpret_cast(procAddr(device, "vkCmdResetEvent2")); +#endif +#if (defined(VK_KHR_synchronization2)) + fp_vkCmdResetEvent2KHR = reinterpret_cast(procAddr(device, "vkCmdResetEvent2KHR")); +#endif + fp_vkCmdResetQueryPool = reinterpret_cast(procAddr(device, "vkCmdResetQueryPool")); + fp_vkCmdResolveImage = reinterpret_cast(procAddr(device, "vkCmdResolveImage")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdResolveImage2 = reinterpret_cast(procAddr(device, "vkCmdResolveImage2")); +#endif +#if (defined(VK_KHR_copy_commands2)) + fp_vkCmdResolveImage2KHR = reinterpret_cast(procAddr(device, "vkCmdResolveImage2KHR")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetAlphaToCoverageEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetAlphaToCoverageEnableEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetAlphaToOneEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetAlphaToOneEnableEXT")); +#endif +#if (defined(VK_EXT_attachment_feedback_loop_dynamic_state)) + fp_vkCmdSetAttachmentFeedbackLoopEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetAttachmentFeedbackLoopEnableEXT")); +#endif + fp_vkCmdSetBlendConstants = reinterpret_cast(procAddr(device, "vkCmdSetBlendConstants")); +#if (defined(VK_NV_device_diagnostic_checkpoints)) + fp_vkCmdSetCheckpointNV = reinterpret_cast(procAddr(device, "vkCmdSetCheckpointNV")); +#endif +#if (defined(VK_NV_shading_rate_image)) + fp_vkCmdSetCoarseSampleOrderNV = reinterpret_cast(procAddr(device, "vkCmdSetCoarseSampleOrderNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetColorBlendAdvancedEXT = reinterpret_cast(procAddr(device, "vkCmdSetColorBlendAdvancedEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetColorBlendEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetColorBlendEnableEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetColorBlendEquationEXT = reinterpret_cast(procAddr(device, "vkCmdSetColorBlendEquationEXT")); +#endif +#if (defined(VK_EXT_color_write_enable)) + fp_vkCmdSetColorWriteEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetColorWriteEnableEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetColorWriteMaskEXT = reinterpret_cast(procAddr(device, "vkCmdSetColorWriteMaskEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetConservativeRasterizationModeEXT = reinterpret_cast(procAddr(device, "vkCmdSetConservativeRasterizationModeEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetCoverageModulationModeNV = reinterpret_cast(procAddr(device, "vkCmdSetCoverageModulationModeNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetCoverageModulationTableEnableNV = reinterpret_cast(procAddr(device, "vkCmdSetCoverageModulationTableEnableNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetCoverageModulationTableNV = reinterpret_cast(procAddr(device, "vkCmdSetCoverageModulationTableNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetCoverageReductionModeNV = reinterpret_cast(procAddr(device, "vkCmdSetCoverageReductionModeNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetCoverageToColorEnableNV = reinterpret_cast(procAddr(device, "vkCmdSetCoverageToColorEnableNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetCoverageToColorLocationNV = reinterpret_cast(procAddr(device, "vkCmdSetCoverageToColorLocationNV")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetCullMode = reinterpret_cast(procAddr(device, "vkCmdSetCullMode")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetCullModeEXT = reinterpret_cast(procAddr(device, "vkCmdSetCullModeEXT")); +#endif + fp_vkCmdSetDepthBias = reinterpret_cast(procAddr(device, "vkCmdSetDepthBias")); +#if (defined(VK_EXT_depth_bias_control)) + fp_vkCmdSetDepthBias2EXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthBias2EXT")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetDepthBiasEnable = reinterpret_cast(procAddr(device, "vkCmdSetDepthBiasEnable")); +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + fp_vkCmdSetDepthBiasEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthBiasEnableEXT")); +#endif + fp_vkCmdSetDepthBounds = reinterpret_cast(procAddr(device, "vkCmdSetDepthBounds")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetDepthBoundsTestEnable = reinterpret_cast(procAddr(device, "vkCmdSetDepthBoundsTestEnable")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetDepthBoundsTestEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthBoundsTestEnableEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetDepthClampEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthClampEnableEXT")); +#endif +#if (defined(VK_EXT_shader_object) || defined(VK_EXT_depth_clamp_control)) + fp_vkCmdSetDepthClampRangeEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthClampRangeEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetDepthClipEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthClipEnableEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetDepthClipNegativeOneToOneEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthClipNegativeOneToOneEXT")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetDepthCompareOp = reinterpret_cast(procAddr(device, "vkCmdSetDepthCompareOp")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetDepthCompareOpEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthCompareOpEXT")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetDepthTestEnable = reinterpret_cast(procAddr(device, "vkCmdSetDepthTestEnable")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetDepthTestEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthTestEnableEXT")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetDepthWriteEnable = reinterpret_cast(procAddr(device, "vkCmdSetDepthWriteEnable")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetDepthWriteEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetDepthWriteEnableEXT")); +#endif +#if (defined(VK_KHR_maintenance6)) + fp_vkCmdSetDescriptorBufferOffsets2EXT = reinterpret_cast(procAddr(device, "vkCmdSetDescriptorBufferOffsets2EXT")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkCmdSetDescriptorBufferOffsetsEXT = reinterpret_cast(procAddr(device, "vkCmdSetDescriptorBufferOffsetsEXT")); +#endif +#if (defined(VK_VERSION_1_1)) + fp_vkCmdSetDeviceMask = reinterpret_cast(procAddr(device, "vkCmdSetDeviceMask")); +#endif +#if (defined(VK_KHR_device_group)) + fp_vkCmdSetDeviceMaskKHR = reinterpret_cast(procAddr(device, "vkCmdSetDeviceMaskKHR")); +#endif +#if (defined(VK_EXT_discard_rectangles)) + fp_vkCmdSetDiscardRectangleEXT = reinterpret_cast(procAddr(device, "vkCmdSetDiscardRectangleEXT")); +#endif +#if (defined(VK_EXT_discard_rectangles)) && VK_HEADER_VERSION >= 241 + fp_vkCmdSetDiscardRectangleEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetDiscardRectangleEnableEXT")); +#endif +#if (defined(VK_EXT_discard_rectangles)) && VK_HEADER_VERSION >= 241 + fp_vkCmdSetDiscardRectangleModeEXT = reinterpret_cast(procAddr(device, "vkCmdSetDiscardRectangleModeEXT")); +#endif + fp_vkCmdSetEvent = reinterpret_cast(procAddr(device, "vkCmdSetEvent")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetEvent2 = reinterpret_cast(procAddr(device, "vkCmdSetEvent2")); +#endif +#if (defined(VK_KHR_synchronization2)) + fp_vkCmdSetEvent2KHR = reinterpret_cast(procAddr(device, "vkCmdSetEvent2KHR")); +#endif +#if (defined(VK_NV_scissor_exclusive)) && VK_HEADER_VERSION >= 241 + fp_vkCmdSetExclusiveScissorEnableNV = reinterpret_cast(procAddr(device, "vkCmdSetExclusiveScissorEnableNV")); +#endif +#if (defined(VK_NV_scissor_exclusive)) + fp_vkCmdSetExclusiveScissorNV = reinterpret_cast(procAddr(device, "vkCmdSetExclusiveScissorNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetExtraPrimitiveOverestimationSizeEXT = reinterpret_cast(procAddr(device, "vkCmdSetExtraPrimitiveOverestimationSizeEXT")); +#endif +#if (defined(VK_NV_fragment_shading_rate_enums)) + fp_vkCmdSetFragmentShadingRateEnumNV = reinterpret_cast(procAddr(device, "vkCmdSetFragmentShadingRateEnumNV")); +#endif +#if (defined(VK_KHR_fragment_shading_rate)) + fp_vkCmdSetFragmentShadingRateKHR = reinterpret_cast(procAddr(device, "vkCmdSetFragmentShadingRateKHR")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetFrontFace = reinterpret_cast(procAddr(device, "vkCmdSetFrontFace")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetFrontFaceEXT = reinterpret_cast(procAddr(device, "vkCmdSetFrontFaceEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetLineRasterizationModeEXT = reinterpret_cast(procAddr(device, "vkCmdSetLineRasterizationModeEXT")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCmdSetLineStipple = reinterpret_cast(procAddr(device, "vkCmdSetLineStipple")); +#endif +#if (defined(VK_EXT_line_rasterization)) + fp_vkCmdSetLineStippleEXT = reinterpret_cast(procAddr(device, "vkCmdSetLineStippleEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetLineStippleEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetLineStippleEnableEXT")); +#endif +#if (defined(VK_KHR_line_rasterization)) + fp_vkCmdSetLineStippleKHR = reinterpret_cast(procAddr(device, "vkCmdSetLineStippleKHR")); +#endif + fp_vkCmdSetLineWidth = reinterpret_cast(procAddr(device, "vkCmdSetLineWidth")); +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + fp_vkCmdSetLogicOpEXT = reinterpret_cast(procAddr(device, "vkCmdSetLogicOpEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetLogicOpEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetLogicOpEnableEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + fp_vkCmdSetPatchControlPointsEXT = reinterpret_cast(procAddr(device, "vkCmdSetPatchControlPointsEXT")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkCmdSetPerformanceMarkerINTEL = reinterpret_cast(procAddr(device, "vkCmdSetPerformanceMarkerINTEL")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkCmdSetPerformanceOverrideINTEL = reinterpret_cast(procAddr(device, "vkCmdSetPerformanceOverrideINTEL")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkCmdSetPerformanceStreamMarkerINTEL = reinterpret_cast(procAddr(device, "vkCmdSetPerformanceStreamMarkerINTEL")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetPolygonModeEXT = reinterpret_cast(procAddr(device, "vkCmdSetPolygonModeEXT")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetPrimitiveRestartEnable = reinterpret_cast(procAddr(device, "vkCmdSetPrimitiveRestartEnable")); +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + fp_vkCmdSetPrimitiveRestartEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetPrimitiveRestartEnableEXT")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetPrimitiveTopology = reinterpret_cast(procAddr(device, "vkCmdSetPrimitiveTopology")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetPrimitiveTopologyEXT = reinterpret_cast(procAddr(device, "vkCmdSetPrimitiveTopologyEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetProvokingVertexModeEXT = reinterpret_cast(procAddr(device, "vkCmdSetProvokingVertexModeEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetRasterizationSamplesEXT = reinterpret_cast(procAddr(device, "vkCmdSetRasterizationSamplesEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetRasterizationStreamEXT = reinterpret_cast(procAddr(device, "vkCmdSetRasterizationStreamEXT")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetRasterizerDiscardEnable = reinterpret_cast(procAddr(device, "vkCmdSetRasterizerDiscardEnable")); +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + fp_vkCmdSetRasterizerDiscardEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetRasterizerDiscardEnableEXT")); +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + fp_vkCmdSetRayTracingPipelineStackSizeKHR = reinterpret_cast(procAddr(device, "vkCmdSetRayTracingPipelineStackSizeKHR")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCmdSetRenderingAttachmentLocations = reinterpret_cast(procAddr(device, "vkCmdSetRenderingAttachmentLocations")); +#endif +#if (defined(VK_KHR_dynamic_rendering_local_read)) + fp_vkCmdSetRenderingAttachmentLocationsKHR = reinterpret_cast(procAddr(device, "vkCmdSetRenderingAttachmentLocationsKHR")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCmdSetRenderingInputAttachmentIndices = reinterpret_cast(procAddr(device, "vkCmdSetRenderingInputAttachmentIndices")); +#endif +#if (defined(VK_KHR_dynamic_rendering_local_read)) + fp_vkCmdSetRenderingInputAttachmentIndicesKHR = reinterpret_cast(procAddr(device, "vkCmdSetRenderingInputAttachmentIndicesKHR")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetRepresentativeFragmentTestEnableNV = reinterpret_cast(procAddr(device, "vkCmdSetRepresentativeFragmentTestEnableNV")); +#endif +#if (defined(VK_EXT_sample_locations)) + fp_vkCmdSetSampleLocationsEXT = reinterpret_cast(procAddr(device, "vkCmdSetSampleLocationsEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetSampleLocationsEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetSampleLocationsEnableEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetSampleMaskEXT = reinterpret_cast(procAddr(device, "vkCmdSetSampleMaskEXT")); +#endif + fp_vkCmdSetScissor = reinterpret_cast(procAddr(device, "vkCmdSetScissor")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetScissorWithCount = reinterpret_cast(procAddr(device, "vkCmdSetScissorWithCount")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetScissorWithCountEXT = reinterpret_cast(procAddr(device, "vkCmdSetScissorWithCountEXT")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetShadingRateImageEnableNV = reinterpret_cast(procAddr(device, "vkCmdSetShadingRateImageEnableNV")); +#endif + fp_vkCmdSetStencilCompareMask = reinterpret_cast(procAddr(device, "vkCmdSetStencilCompareMask")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetStencilOp = reinterpret_cast(procAddr(device, "vkCmdSetStencilOp")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetStencilOpEXT = reinterpret_cast(procAddr(device, "vkCmdSetStencilOpEXT")); +#endif + fp_vkCmdSetStencilReference = reinterpret_cast(procAddr(device, "vkCmdSetStencilReference")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetStencilTestEnable = reinterpret_cast(procAddr(device, "vkCmdSetStencilTestEnable")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetStencilTestEnableEXT = reinterpret_cast(procAddr(device, "vkCmdSetStencilTestEnableEXT")); +#endif + fp_vkCmdSetStencilWriteMask = reinterpret_cast(procAddr(device, "vkCmdSetStencilWriteMask")); +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetTessellationDomainOriginEXT = reinterpret_cast(procAddr(device, "vkCmdSetTessellationDomainOriginEXT")); +#endif +#if (defined(VK_EXT_vertex_input_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetVertexInputEXT = reinterpret_cast(procAddr(device, "vkCmdSetVertexInputEXT")); +#endif + fp_vkCmdSetViewport = reinterpret_cast(procAddr(device, "vkCmdSetViewport")); +#if (defined(VK_NV_shading_rate_image)) + fp_vkCmdSetViewportShadingRatePaletteNV = reinterpret_cast(procAddr(device, "vkCmdSetViewportShadingRatePaletteNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetViewportSwizzleNV = reinterpret_cast(procAddr(device, "vkCmdSetViewportSwizzleNV")); +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + fp_vkCmdSetViewportWScalingEnableNV = reinterpret_cast(procAddr(device, "vkCmdSetViewportWScalingEnableNV")); +#endif +#if (defined(VK_NV_clip_space_w_scaling)) + fp_vkCmdSetViewportWScalingNV = reinterpret_cast(procAddr(device, "vkCmdSetViewportWScalingNV")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkCmdSetViewportWithCount = reinterpret_cast(procAddr(device, "vkCmdSetViewportWithCount")); +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + fp_vkCmdSetViewportWithCountEXT = reinterpret_cast(procAddr(device, "vkCmdSetViewportWithCountEXT")); +#endif +#if (defined(VK_HUAWEI_subpass_shading)) + fp_vkCmdSubpassShadingHUAWEI = reinterpret_cast(procAddr(device, "vkCmdSubpassShadingHUAWEI")); +#endif +#if (defined(VK_KHR_ray_tracing_maintenance1)) + fp_vkCmdTraceRaysIndirect2KHR = reinterpret_cast(procAddr(device, "vkCmdTraceRaysIndirect2KHR")); +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + fp_vkCmdTraceRaysIndirectKHR = reinterpret_cast(procAddr(device, "vkCmdTraceRaysIndirectKHR")); +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + fp_vkCmdTraceRaysKHR = reinterpret_cast(procAddr(device, "vkCmdTraceRaysKHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkCmdTraceRaysNV = reinterpret_cast(procAddr(device, "vkCmdTraceRaysNV")); +#endif + fp_vkCmdUpdateBuffer = reinterpret_cast(procAddr(device, "vkCmdUpdateBuffer")); +#if (defined(VK_NV_device_generated_commands_compute)) + fp_vkCmdUpdatePipelineIndirectBufferNV = reinterpret_cast(procAddr(device, "vkCmdUpdatePipelineIndirectBufferNV")); +#endif + fp_vkCmdWaitEvents = reinterpret_cast(procAddr(device, "vkCmdWaitEvents")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdWaitEvents2 = reinterpret_cast(procAddr(device, "vkCmdWaitEvents2")); +#endif +#if (defined(VK_KHR_synchronization2)) + fp_vkCmdWaitEvents2KHR = reinterpret_cast(procAddr(device, "vkCmdWaitEvents2KHR")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCmdWriteAccelerationStructuresPropertiesKHR = reinterpret_cast(procAddr(device, "vkCmdWriteAccelerationStructuresPropertiesKHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkCmdWriteAccelerationStructuresPropertiesNV = reinterpret_cast(procAddr(device, "vkCmdWriteAccelerationStructuresPropertiesNV")); +#endif +#if (defined(VK_AMD_buffer_marker)) + fp_vkCmdWriteBufferMarker2AMD = reinterpret_cast(procAddr(device, "vkCmdWriteBufferMarker2AMD")); +#endif +#if (defined(VK_AMD_buffer_marker)) + fp_vkCmdWriteBufferMarkerAMD = reinterpret_cast(procAddr(device, "vkCmdWriteBufferMarkerAMD")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCmdWriteMicromapsPropertiesEXT = reinterpret_cast(procAddr(device, "vkCmdWriteMicromapsPropertiesEXT")); +#endif + fp_vkCmdWriteTimestamp = reinterpret_cast(procAddr(device, "vkCmdWriteTimestamp")); +#if (defined(VK_VERSION_1_3)) + fp_vkCmdWriteTimestamp2 = reinterpret_cast(procAddr(device, "vkCmdWriteTimestamp2")); +#endif +#if (defined(VK_KHR_synchronization2)) + fp_vkCmdWriteTimestamp2KHR = reinterpret_cast(procAddr(device, "vkCmdWriteTimestamp2KHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkCompileDeferredNV = reinterpret_cast(procAddr(device, "vkCompileDeferredNV")); +#endif +#if (defined(VK_NV_cooperative_vector)) + fp_vkConvertCooperativeVectorMatrixNV = reinterpret_cast(procAddr(device, "vkConvertCooperativeVectorMatrixNV")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCopyAccelerationStructureKHR = reinterpret_cast(procAddr(device, "vkCopyAccelerationStructureKHR")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCopyAccelerationStructureToMemoryKHR = reinterpret_cast(procAddr(device, "vkCopyAccelerationStructureToMemoryKHR")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCopyImageToImage = reinterpret_cast(procAddr(device, "vkCopyImageToImage")); +#endif +#if (defined(VK_EXT_host_image_copy)) + fp_vkCopyImageToImageEXT = reinterpret_cast(procAddr(device, "vkCopyImageToImageEXT")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCopyImageToMemory = reinterpret_cast(procAddr(device, "vkCopyImageToMemory")); +#endif +#if (defined(VK_EXT_host_image_copy)) + fp_vkCopyImageToMemoryEXT = reinterpret_cast(procAddr(device, "vkCopyImageToMemoryEXT")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCopyMemoryToAccelerationStructureKHR = reinterpret_cast(procAddr(device, "vkCopyMemoryToAccelerationStructureKHR")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkCopyMemoryToImage = reinterpret_cast(procAddr(device, "vkCopyMemoryToImage")); +#endif +#if (defined(VK_EXT_host_image_copy)) + fp_vkCopyMemoryToImageEXT = reinterpret_cast(procAddr(device, "vkCopyMemoryToImageEXT")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCopyMemoryToMicromapEXT = reinterpret_cast(procAddr(device, "vkCopyMemoryToMicromapEXT")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCopyMicromapEXT = reinterpret_cast(procAddr(device, "vkCopyMicromapEXT")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCopyMicromapToMemoryEXT = reinterpret_cast(procAddr(device, "vkCopyMicromapToMemoryEXT")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkCreateAccelerationStructureKHR = reinterpret_cast(procAddr(device, "vkCreateAccelerationStructureKHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkCreateAccelerationStructureNV = reinterpret_cast(procAddr(device, "vkCreateAccelerationStructureNV")); +#endif + fp_vkCreateBuffer = reinterpret_cast(procAddr(device, "vkCreateBuffer")); +#if (defined(VK_FUCHSIA_buffer_collection)) + fp_vkCreateBufferCollectionFUCHSIA = reinterpret_cast(procAddr(device, "vkCreateBufferCollectionFUCHSIA")); +#endif + fp_vkCreateBufferView = reinterpret_cast(procAddr(device, "vkCreateBufferView")); + fp_vkCreateCommandPool = reinterpret_cast(procAddr(device, "vkCreateCommandPool")); + fp_vkCreateComputePipelines = reinterpret_cast(procAddr(device, "vkCreateComputePipelines")); +#if (defined(VK_NVX_binary_import)) + fp_vkCreateCuFunctionNVX = reinterpret_cast(procAddr(device, "vkCreateCuFunctionNVX")); +#endif +#if (defined(VK_NVX_binary_import)) + fp_vkCreateCuModuleNVX = reinterpret_cast(procAddr(device, "vkCreateCuModuleNVX")); +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + fp_vkCreateCudaFunctionNV = reinterpret_cast(procAddr(device, "vkCreateCudaFunctionNV")); +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + fp_vkCreateCudaModuleNV = reinterpret_cast(procAddr(device, "vkCreateCudaModuleNV")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkCreateDataGraphPipelineSessionARM = reinterpret_cast(procAddr(device, "vkCreateDataGraphPipelineSessionARM")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkCreateDataGraphPipelinesARM = reinterpret_cast(procAddr(device, "vkCreateDataGraphPipelinesARM")); +#endif +#if (defined(VK_KHR_deferred_host_operations)) + fp_vkCreateDeferredOperationKHR = reinterpret_cast(procAddr(device, "vkCreateDeferredOperationKHR")); +#endif + fp_vkCreateDescriptorPool = reinterpret_cast(procAddr(device, "vkCreateDescriptorPool")); + fp_vkCreateDescriptorSetLayout = reinterpret_cast(procAddr(device, "vkCreateDescriptorSetLayout")); +#if (defined(VK_VERSION_1_1)) + fp_vkCreateDescriptorUpdateTemplate = reinterpret_cast(procAddr(device, "vkCreateDescriptorUpdateTemplate")); +#endif +#if (defined(VK_KHR_descriptor_update_template)) + fp_vkCreateDescriptorUpdateTemplateKHR = reinterpret_cast(procAddr(device, "vkCreateDescriptorUpdateTemplateKHR")); +#endif + fp_vkCreateEvent = reinterpret_cast(procAddr(device, "vkCreateEvent")); +#if (defined(VK_AMDX_shader_enqueue)) + fp_vkCreateExecutionGraphPipelinesAMDX = reinterpret_cast(procAddr(device, "vkCreateExecutionGraphPipelinesAMDX")); +#endif +#if (defined(VK_NV_external_compute_queue)) + fp_vkCreateExternalComputeQueueNV = reinterpret_cast(procAddr(device, "vkCreateExternalComputeQueueNV")); +#endif + fp_vkCreateFence = reinterpret_cast(procAddr(device, "vkCreateFence")); + fp_vkCreateFramebuffer = reinterpret_cast(procAddr(device, "vkCreateFramebuffer")); + fp_vkCreateGraphicsPipelines = reinterpret_cast(procAddr(device, "vkCreateGraphicsPipelines")); + fp_vkCreateImage = reinterpret_cast(procAddr(device, "vkCreateImage")); + fp_vkCreateImageView = reinterpret_cast(procAddr(device, "vkCreateImageView")); +#if (defined(VK_EXT_device_generated_commands)) + fp_vkCreateIndirectCommandsLayoutEXT = reinterpret_cast(procAddr(device, "vkCreateIndirectCommandsLayoutEXT")); +#endif +#if (defined(VK_NV_device_generated_commands)) + fp_vkCreateIndirectCommandsLayoutNV = reinterpret_cast(procAddr(device, "vkCreateIndirectCommandsLayoutNV")); +#endif +#if (defined(VK_EXT_device_generated_commands)) + fp_vkCreateIndirectExecutionSetEXT = reinterpret_cast(procAddr(device, "vkCreateIndirectExecutionSetEXT")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkCreateMicromapEXT = reinterpret_cast(procAddr(device, "vkCreateMicromapEXT")); +#endif +#if (defined(VK_NV_optical_flow)) + fp_vkCreateOpticalFlowSessionNV = reinterpret_cast(procAddr(device, "vkCreateOpticalFlowSessionNV")); +#endif +#if (defined(VK_KHR_pipeline_binary)) + fp_vkCreatePipelineBinariesKHR = reinterpret_cast(procAddr(device, "vkCreatePipelineBinariesKHR")); +#endif + fp_vkCreatePipelineCache = reinterpret_cast(procAddr(device, "vkCreatePipelineCache")); + fp_vkCreatePipelineLayout = reinterpret_cast(procAddr(device, "vkCreatePipelineLayout")); +#if (defined(VK_VERSION_1_3)) + fp_vkCreatePrivateDataSlot = reinterpret_cast(procAddr(device, "vkCreatePrivateDataSlot")); +#endif +#if (defined(VK_EXT_private_data)) + fp_vkCreatePrivateDataSlotEXT = reinterpret_cast(procAddr(device, "vkCreatePrivateDataSlotEXT")); +#endif + fp_vkCreateQueryPool = reinterpret_cast(procAddr(device, "vkCreateQueryPool")); +#if (defined(VK_KHR_ray_tracing_pipeline)) + fp_vkCreateRayTracingPipelinesKHR = reinterpret_cast(procAddr(device, "vkCreateRayTracingPipelinesKHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkCreateRayTracingPipelinesNV = reinterpret_cast(procAddr(device, "vkCreateRayTracingPipelinesNV")); +#endif + fp_vkCreateRenderPass = reinterpret_cast(procAddr(device, "vkCreateRenderPass")); +#if (defined(VK_VERSION_1_2)) + fp_vkCreateRenderPass2 = reinterpret_cast(procAddr(device, "vkCreateRenderPass2")); +#endif +#if (defined(VK_KHR_create_renderpass2)) + fp_vkCreateRenderPass2KHR = reinterpret_cast(procAddr(device, "vkCreateRenderPass2KHR")); +#endif + fp_vkCreateSampler = reinterpret_cast(procAddr(device, "vkCreateSampler")); +#if (defined(VK_VERSION_1_1)) + fp_vkCreateSamplerYcbcrConversion = reinterpret_cast(procAddr(device, "vkCreateSamplerYcbcrConversion")); +#endif +#if (defined(VK_KHR_sampler_ycbcr_conversion)) + fp_vkCreateSamplerYcbcrConversionKHR = reinterpret_cast(procAddr(device, "vkCreateSamplerYcbcrConversionKHR")); +#endif + fp_vkCreateSemaphore = reinterpret_cast(procAddr(device, "vkCreateSemaphore")); + fp_vkCreateShaderModule = reinterpret_cast(procAddr(device, "vkCreateShaderModule")); +#if (defined(VK_EXT_shader_object)) + fp_vkCreateShadersEXT = reinterpret_cast(procAddr(device, "vkCreateShadersEXT")); +#endif +#if (defined(VK_KHR_display_swapchain)) + fp_vkCreateSharedSwapchainsKHR = reinterpret_cast(procAddr(device, "vkCreateSharedSwapchainsKHR")); +#endif +#if (defined(VK_KHR_swapchain)) + fp_vkCreateSwapchainKHR = reinterpret_cast(procAddr(device, "vkCreateSwapchainKHR")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkCreateTensorARM = reinterpret_cast(procAddr(device, "vkCreateTensorARM")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkCreateTensorViewARM = reinterpret_cast(procAddr(device, "vkCreateTensorViewARM")); +#endif +#if (defined(VK_EXT_validation_cache)) + fp_vkCreateValidationCacheEXT = reinterpret_cast(procAddr(device, "vkCreateValidationCacheEXT")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkCreateVideoSessionKHR = reinterpret_cast(procAddr(device, "vkCreateVideoSessionKHR")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkCreateVideoSessionParametersKHR = reinterpret_cast(procAddr(device, "vkCreateVideoSessionParametersKHR")); +#endif +#if (defined(VK_EXT_debug_marker)) + fp_vkDebugMarkerSetObjectNameEXT = reinterpret_cast(procAddr(device, "vkDebugMarkerSetObjectNameEXT")); +#endif +#if (defined(VK_EXT_debug_marker)) + fp_vkDebugMarkerSetObjectTagEXT = reinterpret_cast(procAddr(device, "vkDebugMarkerSetObjectTagEXT")); +#endif +#if (defined(VK_KHR_deferred_host_operations)) + fp_vkDeferredOperationJoinKHR = reinterpret_cast(procAddr(device, "vkDeferredOperationJoinKHR")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkDestroyAccelerationStructureKHR = reinterpret_cast(procAddr(device, "vkDestroyAccelerationStructureKHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkDestroyAccelerationStructureNV = reinterpret_cast(procAddr(device, "vkDestroyAccelerationStructureNV")); +#endif + fp_vkDestroyBuffer = reinterpret_cast(procAddr(device, "vkDestroyBuffer")); +#if (defined(VK_FUCHSIA_buffer_collection)) + fp_vkDestroyBufferCollectionFUCHSIA = reinterpret_cast(procAddr(device, "vkDestroyBufferCollectionFUCHSIA")); +#endif + fp_vkDestroyBufferView = reinterpret_cast(procAddr(device, "vkDestroyBufferView")); + fp_vkDestroyCommandPool = reinterpret_cast(procAddr(device, "vkDestroyCommandPool")); +#if (defined(VK_NVX_binary_import)) + fp_vkDestroyCuFunctionNVX = reinterpret_cast(procAddr(device, "vkDestroyCuFunctionNVX")); +#endif +#if (defined(VK_NVX_binary_import)) + fp_vkDestroyCuModuleNVX = reinterpret_cast(procAddr(device, "vkDestroyCuModuleNVX")); +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + fp_vkDestroyCudaFunctionNV = reinterpret_cast(procAddr(device, "vkDestroyCudaFunctionNV")); +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + fp_vkDestroyCudaModuleNV = reinterpret_cast(procAddr(device, "vkDestroyCudaModuleNV")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkDestroyDataGraphPipelineSessionARM = reinterpret_cast(procAddr(device, "vkDestroyDataGraphPipelineSessionARM")); +#endif +#if (defined(VK_KHR_deferred_host_operations)) + fp_vkDestroyDeferredOperationKHR = reinterpret_cast(procAddr(device, "vkDestroyDeferredOperationKHR")); +#endif + fp_vkDestroyDescriptorPool = reinterpret_cast(procAddr(device, "vkDestroyDescriptorPool")); + fp_vkDestroyDescriptorSetLayout = reinterpret_cast(procAddr(device, "vkDestroyDescriptorSetLayout")); +#if (defined(VK_VERSION_1_1)) + fp_vkDestroyDescriptorUpdateTemplate = reinterpret_cast(procAddr(device, "vkDestroyDescriptorUpdateTemplate")); +#endif +#if (defined(VK_KHR_descriptor_update_template)) + fp_vkDestroyDescriptorUpdateTemplateKHR = reinterpret_cast(procAddr(device, "vkDestroyDescriptorUpdateTemplateKHR")); +#endif + fp_vkDestroyEvent = reinterpret_cast(procAddr(device, "vkDestroyEvent")); +#if (defined(VK_NV_external_compute_queue)) + fp_vkDestroyExternalComputeQueueNV = reinterpret_cast(procAddr(device, "vkDestroyExternalComputeQueueNV")); +#endif + fp_vkDestroyFence = reinterpret_cast(procAddr(device, "vkDestroyFence")); + fp_vkDestroyFramebuffer = reinterpret_cast(procAddr(device, "vkDestroyFramebuffer")); + fp_vkDestroyImage = reinterpret_cast(procAddr(device, "vkDestroyImage")); + fp_vkDestroyImageView = reinterpret_cast(procAddr(device, "vkDestroyImageView")); +#if (defined(VK_EXT_device_generated_commands)) + fp_vkDestroyIndirectCommandsLayoutEXT = reinterpret_cast(procAddr(device, "vkDestroyIndirectCommandsLayoutEXT")); +#endif +#if (defined(VK_NV_device_generated_commands)) + fp_vkDestroyIndirectCommandsLayoutNV = reinterpret_cast(procAddr(device, "vkDestroyIndirectCommandsLayoutNV")); +#endif +#if (defined(VK_EXT_device_generated_commands)) + fp_vkDestroyIndirectExecutionSetEXT = reinterpret_cast(procAddr(device, "vkDestroyIndirectExecutionSetEXT")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkDestroyMicromapEXT = reinterpret_cast(procAddr(device, "vkDestroyMicromapEXT")); +#endif +#if (defined(VK_NV_optical_flow)) + fp_vkDestroyOpticalFlowSessionNV = reinterpret_cast(procAddr(device, "vkDestroyOpticalFlowSessionNV")); +#endif + fp_vkDestroyPipeline = reinterpret_cast(procAddr(device, "vkDestroyPipeline")); +#if (defined(VK_KHR_pipeline_binary)) + fp_vkDestroyPipelineBinaryKHR = reinterpret_cast(procAddr(device, "vkDestroyPipelineBinaryKHR")); +#endif + fp_vkDestroyPipelineCache = reinterpret_cast(procAddr(device, "vkDestroyPipelineCache")); + fp_vkDestroyPipelineLayout = reinterpret_cast(procAddr(device, "vkDestroyPipelineLayout")); +#if (defined(VK_VERSION_1_3)) + fp_vkDestroyPrivateDataSlot = reinterpret_cast(procAddr(device, "vkDestroyPrivateDataSlot")); +#endif +#if (defined(VK_EXT_private_data)) + fp_vkDestroyPrivateDataSlotEXT = reinterpret_cast(procAddr(device, "vkDestroyPrivateDataSlotEXT")); +#endif + fp_vkDestroyQueryPool = reinterpret_cast(procAddr(device, "vkDestroyQueryPool")); + fp_vkDestroyRenderPass = reinterpret_cast(procAddr(device, "vkDestroyRenderPass")); + fp_vkDestroySampler = reinterpret_cast(procAddr(device, "vkDestroySampler")); +#if (defined(VK_VERSION_1_1)) + fp_vkDestroySamplerYcbcrConversion = reinterpret_cast(procAddr(device, "vkDestroySamplerYcbcrConversion")); +#endif +#if (defined(VK_KHR_sampler_ycbcr_conversion)) + fp_vkDestroySamplerYcbcrConversionKHR = reinterpret_cast(procAddr(device, "vkDestroySamplerYcbcrConversionKHR")); +#endif + fp_vkDestroySemaphore = reinterpret_cast(procAddr(device, "vkDestroySemaphore")); +#if (defined(VK_EXT_shader_object)) + fp_vkDestroyShaderEXT = reinterpret_cast(procAddr(device, "vkDestroyShaderEXT")); +#endif + fp_vkDestroyShaderModule = reinterpret_cast(procAddr(device, "vkDestroyShaderModule")); +#if (defined(VK_KHR_swapchain)) + fp_vkDestroySwapchainKHR = reinterpret_cast(procAddr(device, "vkDestroySwapchainKHR")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkDestroyTensorARM = reinterpret_cast(procAddr(device, "vkDestroyTensorARM")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkDestroyTensorViewARM = reinterpret_cast(procAddr(device, "vkDestroyTensorViewARM")); +#endif +#if (defined(VK_EXT_validation_cache)) + fp_vkDestroyValidationCacheEXT = reinterpret_cast(procAddr(device, "vkDestroyValidationCacheEXT")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkDestroyVideoSessionKHR = reinterpret_cast(procAddr(device, "vkDestroyVideoSessionKHR")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkDestroyVideoSessionParametersKHR = reinterpret_cast(procAddr(device, "vkDestroyVideoSessionParametersKHR")); +#endif + fp_vkDeviceWaitIdle = reinterpret_cast(procAddr(device, "vkDeviceWaitIdle")); +#if (defined(VK_EXT_display_control)) + fp_vkDisplayPowerControlEXT = reinterpret_cast(procAddr(device, "vkDisplayPowerControlEXT")); +#endif + fp_vkEndCommandBuffer = reinterpret_cast(procAddr(device, "vkEndCommandBuffer")); +#if (defined(VK_EXT_metal_objects)) + fp_vkExportMetalObjectsEXT = reinterpret_cast(procAddr(device, "vkExportMetalObjectsEXT")); +#endif + fp_vkFlushMappedMemoryRanges = reinterpret_cast(procAddr(device, "vkFlushMappedMemoryRanges")); + fp_vkFreeCommandBuffers = reinterpret_cast(procAddr(device, "vkFreeCommandBuffers")); + fp_vkFreeDescriptorSets = reinterpret_cast(procAddr(device, "vkFreeDescriptorSets")); + fp_vkFreeMemory = reinterpret_cast(procAddr(device, "vkFreeMemory")); +#if (defined(VK_KHR_acceleration_structure)) + fp_vkGetAccelerationStructureBuildSizesKHR = reinterpret_cast(procAddr(device, "vkGetAccelerationStructureBuildSizesKHR")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkGetAccelerationStructureDeviceAddressKHR = reinterpret_cast(procAddr(device, "vkGetAccelerationStructureDeviceAddressKHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkGetAccelerationStructureHandleNV = reinterpret_cast(procAddr(device, "vkGetAccelerationStructureHandleNV")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkGetAccelerationStructureMemoryRequirementsNV = reinterpret_cast(procAddr(device, "vkGetAccelerationStructureMemoryRequirementsNV")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT = reinterpret_cast(procAddr(device, "vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT")); +#endif +#if (defined(VK_ANDROID_external_memory_android_hardware_buffer)) + fp_vkGetAndroidHardwareBufferPropertiesANDROID = reinterpret_cast(procAddr(device, "vkGetAndroidHardwareBufferPropertiesANDROID")); +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + fp_vkGetBufferCollectionPropertiesFUCHSIA = reinterpret_cast(procAddr(device, "vkGetBufferCollectionPropertiesFUCHSIA")); +#endif +#if (defined(VK_VERSION_1_2)) + fp_vkGetBufferDeviceAddress = reinterpret_cast(procAddr(device, "vkGetBufferDeviceAddress")); +#endif +#if (defined(VK_EXT_buffer_device_address)) + fp_vkGetBufferDeviceAddressEXT = reinterpret_cast(procAddr(device, "vkGetBufferDeviceAddressEXT")); +#endif +#if (defined(VK_KHR_buffer_device_address)) + fp_vkGetBufferDeviceAddressKHR = reinterpret_cast(procAddr(device, "vkGetBufferDeviceAddressKHR")); +#endif + fp_vkGetBufferMemoryRequirements = reinterpret_cast(procAddr(device, "vkGetBufferMemoryRequirements")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetBufferMemoryRequirements2 = reinterpret_cast(procAddr(device, "vkGetBufferMemoryRequirements2")); +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + fp_vkGetBufferMemoryRequirements2KHR = reinterpret_cast(procAddr(device, "vkGetBufferMemoryRequirements2KHR")); +#endif +#if (defined(VK_VERSION_1_2)) + fp_vkGetBufferOpaqueCaptureAddress = reinterpret_cast(procAddr(device, "vkGetBufferOpaqueCaptureAddress")); +#endif +#if (defined(VK_KHR_buffer_device_address)) + fp_vkGetBufferOpaqueCaptureAddressKHR = reinterpret_cast(procAddr(device, "vkGetBufferOpaqueCaptureAddressKHR")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkGetBufferOpaqueCaptureDescriptorDataEXT = reinterpret_cast(procAddr(device, "vkGetBufferOpaqueCaptureDescriptorDataEXT")); +#endif +#if (defined(VK_EXT_calibrated_timestamps)) + fp_vkGetCalibratedTimestampsEXT = reinterpret_cast(procAddr(device, "vkGetCalibratedTimestampsEXT")); +#endif +#if (defined(VK_KHR_calibrated_timestamps)) + fp_vkGetCalibratedTimestampsKHR = reinterpret_cast(procAddr(device, "vkGetCalibratedTimestampsKHR")); +#endif +#if (defined(VK_NV_cluster_acceleration_structure)) + fp_vkGetClusterAccelerationStructureBuildSizesNV = reinterpret_cast(procAddr(device, "vkGetClusterAccelerationStructureBuildSizesNV")); +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + fp_vkGetCudaModuleCacheNV = reinterpret_cast(procAddr(device, "vkGetCudaModuleCacheNV")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkGetDataGraphPipelineAvailablePropertiesARM = reinterpret_cast(procAddr(device, "vkGetDataGraphPipelineAvailablePropertiesARM")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkGetDataGraphPipelinePropertiesARM = reinterpret_cast(procAddr(device, "vkGetDataGraphPipelinePropertiesARM")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkGetDataGraphPipelineSessionBindPointRequirementsARM = reinterpret_cast(procAddr(device, "vkGetDataGraphPipelineSessionBindPointRequirementsARM")); +#endif +#if (defined(VK_ARM_data_graph)) + fp_vkGetDataGraphPipelineSessionMemoryRequirementsARM = reinterpret_cast(procAddr(device, "vkGetDataGraphPipelineSessionMemoryRequirementsARM")); +#endif +#if (defined(VK_KHR_deferred_host_operations)) + fp_vkGetDeferredOperationMaxConcurrencyKHR = reinterpret_cast(procAddr(device, "vkGetDeferredOperationMaxConcurrencyKHR")); +#endif +#if (defined(VK_KHR_deferred_host_operations)) + fp_vkGetDeferredOperationResultKHR = reinterpret_cast(procAddr(device, "vkGetDeferredOperationResultKHR")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkGetDescriptorEXT = reinterpret_cast(procAddr(device, "vkGetDescriptorEXT")); +#endif +#if (defined(VK_VALVE_descriptor_set_host_mapping)) + fp_vkGetDescriptorSetHostMappingVALVE = reinterpret_cast(procAddr(device, "vkGetDescriptorSetHostMappingVALVE")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkGetDescriptorSetLayoutBindingOffsetEXT = reinterpret_cast(procAddr(device, "vkGetDescriptorSetLayoutBindingOffsetEXT")); +#endif +#if (defined(VK_VALVE_descriptor_set_host_mapping)) + fp_vkGetDescriptorSetLayoutHostMappingInfoVALVE = reinterpret_cast(procAddr(device, "vkGetDescriptorSetLayoutHostMappingInfoVALVE")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkGetDescriptorSetLayoutSizeEXT = reinterpret_cast(procAddr(device, "vkGetDescriptorSetLayoutSizeEXT")); +#endif +#if (defined(VK_VERSION_1_1)) + fp_vkGetDescriptorSetLayoutSupport = reinterpret_cast(procAddr(device, "vkGetDescriptorSetLayoutSupport")); +#endif +#if (defined(VK_KHR_maintenance3)) + fp_vkGetDescriptorSetLayoutSupportKHR = reinterpret_cast(procAddr(device, "vkGetDescriptorSetLayoutSupportKHR")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkGetDeviceAccelerationStructureCompatibilityKHR = reinterpret_cast(procAddr(device, "vkGetDeviceAccelerationStructureCompatibilityKHR")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkGetDeviceBufferMemoryRequirements = reinterpret_cast(procAddr(device, "vkGetDeviceBufferMemoryRequirements")); +#endif +#if (defined(VK_KHR_maintenance4)) + fp_vkGetDeviceBufferMemoryRequirementsKHR = reinterpret_cast(procAddr(device, "vkGetDeviceBufferMemoryRequirementsKHR")); +#endif +#if (defined(VK_EXT_device_fault)) + fp_vkGetDeviceFaultInfoEXT = reinterpret_cast(procAddr(device, "vkGetDeviceFaultInfoEXT")); +#endif +#if (defined(VK_VERSION_1_1)) + fp_vkGetDeviceGroupPeerMemoryFeatures = reinterpret_cast(procAddr(device, "vkGetDeviceGroupPeerMemoryFeatures")); +#endif +#if (defined(VK_KHR_device_group)) + fp_vkGetDeviceGroupPeerMemoryFeaturesKHR = reinterpret_cast(procAddr(device, "vkGetDeviceGroupPeerMemoryFeaturesKHR")); +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + fp_vkGetDeviceGroupPresentCapabilitiesKHR = reinterpret_cast(procAddr(device, "vkGetDeviceGroupPresentCapabilitiesKHR")); +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + fp_vkGetDeviceGroupSurfacePresentModes2EXT = reinterpret_cast(procAddr(device, "vkGetDeviceGroupSurfacePresentModes2EXT")); +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + fp_vkGetDeviceGroupSurfacePresentModesKHR = reinterpret_cast(procAddr(device, "vkGetDeviceGroupSurfacePresentModesKHR")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkGetDeviceImageMemoryRequirements = reinterpret_cast(procAddr(device, "vkGetDeviceImageMemoryRequirements")); +#endif +#if (defined(VK_KHR_maintenance4)) + fp_vkGetDeviceImageMemoryRequirementsKHR = reinterpret_cast(procAddr(device, "vkGetDeviceImageMemoryRequirementsKHR")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkGetDeviceImageSparseMemoryRequirements = reinterpret_cast(procAddr(device, "vkGetDeviceImageSparseMemoryRequirements")); +#endif +#if (defined(VK_KHR_maintenance4)) + fp_vkGetDeviceImageSparseMemoryRequirementsKHR = reinterpret_cast(procAddr(device, "vkGetDeviceImageSparseMemoryRequirementsKHR")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkGetDeviceImageSubresourceLayout = reinterpret_cast(procAddr(device, "vkGetDeviceImageSubresourceLayout")); +#endif +#if (defined(VK_KHR_maintenance5)) + fp_vkGetDeviceImageSubresourceLayoutKHR = reinterpret_cast(procAddr(device, "vkGetDeviceImageSubresourceLayoutKHR")); +#endif + fp_vkGetDeviceMemoryCommitment = reinterpret_cast(procAddr(device, "vkGetDeviceMemoryCommitment")); +#if (defined(VK_VERSION_1_2)) + fp_vkGetDeviceMemoryOpaqueCaptureAddress = reinterpret_cast(procAddr(device, "vkGetDeviceMemoryOpaqueCaptureAddress")); +#endif +#if (defined(VK_KHR_buffer_device_address)) + fp_vkGetDeviceMemoryOpaqueCaptureAddressKHR = reinterpret_cast(procAddr(device, "vkGetDeviceMemoryOpaqueCaptureAddressKHR")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkGetDeviceMicromapCompatibilityEXT = reinterpret_cast(procAddr(device, "vkGetDeviceMicromapCompatibilityEXT")); +#endif + fp_vkGetDeviceQueue = reinterpret_cast(procAddr(device, "vkGetDeviceQueue")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetDeviceQueue2 = reinterpret_cast(procAddr(device, "vkGetDeviceQueue2")); +#endif +#if (defined(VK_HUAWEI_subpass_shading)) + fp_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI = reinterpret_cast(procAddr(device, "vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkGetDeviceTensorMemoryRequirementsARM = reinterpret_cast(procAddr(device, "vkGetDeviceTensorMemoryRequirementsARM")); +#endif +#if (defined(VK_QCOM_tile_properties)) + fp_vkGetDynamicRenderingTilePropertiesQCOM = reinterpret_cast(procAddr(device, "vkGetDynamicRenderingTilePropertiesQCOM")); +#endif +#if (defined(VK_KHR_video_encode_queue)) + fp_vkGetEncodedVideoSessionParametersKHR = reinterpret_cast(procAddr(device, "vkGetEncodedVideoSessionParametersKHR")); +#endif + fp_vkGetEventStatus = reinterpret_cast(procAddr(device, "vkGetEventStatus")); +#if (defined(VK_AMDX_shader_enqueue)) + fp_vkGetExecutionGraphPipelineNodeIndexAMDX = reinterpret_cast(procAddr(device, "vkGetExecutionGraphPipelineNodeIndexAMDX")); +#endif +#if (defined(VK_AMDX_shader_enqueue)) + fp_vkGetExecutionGraphPipelineScratchSizeAMDX = reinterpret_cast(procAddr(device, "vkGetExecutionGraphPipelineScratchSizeAMDX")); +#endif +#if (defined(VK_NV_external_compute_queue)) + fp_vkGetExternalComputeQueueDataNV = reinterpret_cast(procAddr(device, "vkGetExternalComputeQueueDataNV")); +#endif +#if (defined(VK_KHR_external_fence_fd)) + fp_vkGetFenceFdKHR = reinterpret_cast(procAddr(device, "vkGetFenceFdKHR")); +#endif + fp_vkGetFenceStatus = reinterpret_cast(procAddr(device, "vkGetFenceStatus")); +#if (defined(VK_KHR_external_fence_win32)) + fp_vkGetFenceWin32HandleKHR = reinterpret_cast(procAddr(device, "vkGetFenceWin32HandleKHR")); +#endif +#if (defined(VK_QCOM_tile_properties)) + fp_vkGetFramebufferTilePropertiesQCOM = reinterpret_cast(procAddr(device, "vkGetFramebufferTilePropertiesQCOM")); +#endif +#if (defined(VK_EXT_device_generated_commands)) + fp_vkGetGeneratedCommandsMemoryRequirementsEXT = reinterpret_cast(procAddr(device, "vkGetGeneratedCommandsMemoryRequirementsEXT")); +#endif +#if (defined(VK_NV_device_generated_commands)) + fp_vkGetGeneratedCommandsMemoryRequirementsNV = reinterpret_cast(procAddr(device, "vkGetGeneratedCommandsMemoryRequirementsNV")); +#endif +#if (defined(VK_EXT_image_drm_format_modifier)) + fp_vkGetImageDrmFormatModifierPropertiesEXT = reinterpret_cast(procAddr(device, "vkGetImageDrmFormatModifierPropertiesEXT")); +#endif + fp_vkGetImageMemoryRequirements = reinterpret_cast(procAddr(device, "vkGetImageMemoryRequirements")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetImageMemoryRequirements2 = reinterpret_cast(procAddr(device, "vkGetImageMemoryRequirements2")); +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + fp_vkGetImageMemoryRequirements2KHR = reinterpret_cast(procAddr(device, "vkGetImageMemoryRequirements2KHR")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkGetImageOpaqueCaptureDescriptorDataEXT = reinterpret_cast(procAddr(device, "vkGetImageOpaqueCaptureDescriptorDataEXT")); +#endif + fp_vkGetImageSparseMemoryRequirements = reinterpret_cast(procAddr(device, "vkGetImageSparseMemoryRequirements")); +#if (defined(VK_VERSION_1_1)) + fp_vkGetImageSparseMemoryRequirements2 = reinterpret_cast(procAddr(device, "vkGetImageSparseMemoryRequirements2")); +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + fp_vkGetImageSparseMemoryRequirements2KHR = reinterpret_cast(procAddr(device, "vkGetImageSparseMemoryRequirements2KHR")); +#endif + fp_vkGetImageSubresourceLayout = reinterpret_cast(procAddr(device, "vkGetImageSubresourceLayout")); +#if (defined(VK_VERSION_1_4)) + fp_vkGetImageSubresourceLayout2 = reinterpret_cast(procAddr(device, "vkGetImageSubresourceLayout2")); +#endif +#if (defined(VK_EXT_host_image_copy) || defined(VK_EXT_image_compression_control)) + fp_vkGetImageSubresourceLayout2EXT = reinterpret_cast(procAddr(device, "vkGetImageSubresourceLayout2EXT")); +#endif +#if (defined(VK_KHR_maintenance5)) + fp_vkGetImageSubresourceLayout2KHR = reinterpret_cast(procAddr(device, "vkGetImageSubresourceLayout2KHR")); +#endif +#if (defined(VK_NVX_image_view_handle)) + fp_vkGetImageViewAddressNVX = reinterpret_cast(procAddr(device, "vkGetImageViewAddressNVX")); +#endif +#if (defined(VK_NVX_image_view_handle)) + fp_vkGetImageViewHandle64NVX = reinterpret_cast(procAddr(device, "vkGetImageViewHandle64NVX")); +#endif +#if (defined(VK_NVX_image_view_handle)) + fp_vkGetImageViewHandleNVX = reinterpret_cast(procAddr(device, "vkGetImageViewHandleNVX")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkGetImageViewOpaqueCaptureDescriptorDataEXT = reinterpret_cast(procAddr(device, "vkGetImageViewOpaqueCaptureDescriptorDataEXT")); +#endif +#if (defined(VK_NV_low_latency2)) && VK_HEADER_VERSION >= 271 + fp_vkGetLatencyTimingsNV = reinterpret_cast(procAddr(device, "vkGetLatencyTimingsNV")); +#endif +#if (defined(VK_ANDROID_external_memory_android_hardware_buffer)) + fp_vkGetMemoryAndroidHardwareBufferANDROID = reinterpret_cast(procAddr(device, "vkGetMemoryAndroidHardwareBufferANDROID")); +#endif +#if (defined(VK_KHR_external_memory_fd)) + fp_vkGetMemoryFdKHR = reinterpret_cast(procAddr(device, "vkGetMemoryFdKHR")); +#endif +#if (defined(VK_KHR_external_memory_fd)) + fp_vkGetMemoryFdPropertiesKHR = reinterpret_cast(procAddr(device, "vkGetMemoryFdPropertiesKHR")); +#endif +#if (defined(VK_EXT_external_memory_host)) + fp_vkGetMemoryHostPointerPropertiesEXT = reinterpret_cast(procAddr(device, "vkGetMemoryHostPointerPropertiesEXT")); +#endif +#if (defined(VK_EXT_external_memory_metal)) + fp_vkGetMemoryMetalHandleEXT = reinterpret_cast(procAddr(device, "vkGetMemoryMetalHandleEXT")); +#endif +#if (defined(VK_EXT_external_memory_metal)) + fp_vkGetMemoryMetalHandlePropertiesEXT = reinterpret_cast(procAddr(device, "vkGetMemoryMetalHandlePropertiesEXT")); +#endif +#if (defined(VK_NV_external_memory_rdma)) + fp_vkGetMemoryRemoteAddressNV = reinterpret_cast(procAddr(device, "vkGetMemoryRemoteAddressNV")); +#endif +#if (defined(VK_KHR_external_memory_win32)) + fp_vkGetMemoryWin32HandleKHR = reinterpret_cast(procAddr(device, "vkGetMemoryWin32HandleKHR")); +#endif +#if (defined(VK_NV_external_memory_win32)) + fp_vkGetMemoryWin32HandleNV = reinterpret_cast(procAddr(device, "vkGetMemoryWin32HandleNV")); +#endif +#if (defined(VK_KHR_external_memory_win32)) + fp_vkGetMemoryWin32HandlePropertiesKHR = reinterpret_cast(procAddr(device, "vkGetMemoryWin32HandlePropertiesKHR")); +#endif +#if (defined(VK_FUCHSIA_external_memory)) + fp_vkGetMemoryZirconHandleFUCHSIA = reinterpret_cast(procAddr(device, "vkGetMemoryZirconHandleFUCHSIA")); +#endif +#if (defined(VK_FUCHSIA_external_memory)) + fp_vkGetMemoryZirconHandlePropertiesFUCHSIA = reinterpret_cast(procAddr(device, "vkGetMemoryZirconHandlePropertiesFUCHSIA")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkGetMicromapBuildSizesEXT = reinterpret_cast(procAddr(device, "vkGetMicromapBuildSizesEXT")); +#endif +#if (defined(VK_NV_partitioned_acceleration_structure)) + fp_vkGetPartitionedAccelerationStructuresBuildSizesNV = reinterpret_cast(procAddr(device, "vkGetPartitionedAccelerationStructuresBuildSizesNV")); +#endif +#if (defined(VK_GOOGLE_display_timing)) + fp_vkGetPastPresentationTimingGOOGLE = reinterpret_cast(procAddr(device, "vkGetPastPresentationTimingGOOGLE")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkGetPerformanceParameterINTEL = reinterpret_cast(procAddr(device, "vkGetPerformanceParameterINTEL")); +#endif +#if (defined(VK_KHR_pipeline_binary)) + fp_vkGetPipelineBinaryDataKHR = reinterpret_cast(procAddr(device, "vkGetPipelineBinaryDataKHR")); +#endif + fp_vkGetPipelineCacheData = reinterpret_cast(procAddr(device, "vkGetPipelineCacheData")); +#if (defined(VK_KHR_pipeline_executable_properties)) + fp_vkGetPipelineExecutableInternalRepresentationsKHR = reinterpret_cast(procAddr(device, "vkGetPipelineExecutableInternalRepresentationsKHR")); +#endif +#if (defined(VK_KHR_pipeline_executable_properties)) + fp_vkGetPipelineExecutablePropertiesKHR = reinterpret_cast(procAddr(device, "vkGetPipelineExecutablePropertiesKHR")); +#endif +#if (defined(VK_KHR_pipeline_executable_properties)) + fp_vkGetPipelineExecutableStatisticsKHR = reinterpret_cast(procAddr(device, "vkGetPipelineExecutableStatisticsKHR")); +#endif +#if (defined(VK_NV_device_generated_commands_compute)) + fp_vkGetPipelineIndirectDeviceAddressNV = reinterpret_cast(procAddr(device, "vkGetPipelineIndirectDeviceAddressNV")); +#endif +#if (defined(VK_NV_device_generated_commands_compute)) + fp_vkGetPipelineIndirectMemoryRequirementsNV = reinterpret_cast(procAddr(device, "vkGetPipelineIndirectMemoryRequirementsNV")); +#endif +#if (defined(VK_KHR_pipeline_binary)) + fp_vkGetPipelineKeyKHR = reinterpret_cast(procAddr(device, "vkGetPipelineKeyKHR")); +#endif +#if (defined(VK_EXT_pipeline_properties)) + fp_vkGetPipelinePropertiesEXT = reinterpret_cast(procAddr(device, "vkGetPipelinePropertiesEXT")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkGetPrivateData = reinterpret_cast(procAddr(device, "vkGetPrivateData")); +#endif +#if (defined(VK_EXT_private_data)) + fp_vkGetPrivateDataEXT = reinterpret_cast(procAddr(device, "vkGetPrivateDataEXT")); +#endif + fp_vkGetQueryPoolResults = reinterpret_cast(procAddr(device, "vkGetQueryPoolResults")); +#if (defined(VK_NV_device_diagnostic_checkpoints)) + fp_vkGetQueueCheckpointData2NV = reinterpret_cast(procAddr(device, "vkGetQueueCheckpointData2NV")); +#endif +#if (defined(VK_NV_device_diagnostic_checkpoints)) + fp_vkGetQueueCheckpointDataNV = reinterpret_cast(procAddr(device, "vkGetQueueCheckpointDataNV")); +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + fp_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = reinterpret_cast(procAddr(device, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR")); +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + fp_vkGetRayTracingShaderGroupHandlesKHR = reinterpret_cast(procAddr(device, "vkGetRayTracingShaderGroupHandlesKHR")); +#endif +#if (defined(VK_NV_ray_tracing)) + fp_vkGetRayTracingShaderGroupHandlesNV = reinterpret_cast(procAddr(device, "vkGetRayTracingShaderGroupHandlesNV")); +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + fp_vkGetRayTracingShaderGroupStackSizeKHR = reinterpret_cast(procAddr(device, "vkGetRayTracingShaderGroupStackSizeKHR")); +#endif +#if (defined(VK_GOOGLE_display_timing)) + fp_vkGetRefreshCycleDurationGOOGLE = reinterpret_cast(procAddr(device, "vkGetRefreshCycleDurationGOOGLE")); +#endif + fp_vkGetRenderAreaGranularity = reinterpret_cast(procAddr(device, "vkGetRenderAreaGranularity")); +#if (defined(VK_VERSION_1_4)) + fp_vkGetRenderingAreaGranularity = reinterpret_cast(procAddr(device, "vkGetRenderingAreaGranularity")); +#endif +#if (defined(VK_KHR_maintenance5)) + fp_vkGetRenderingAreaGranularityKHR = reinterpret_cast(procAddr(device, "vkGetRenderingAreaGranularityKHR")); +#endif +#if (defined(VK_EXT_descriptor_buffer)) + fp_vkGetSamplerOpaqueCaptureDescriptorDataEXT = reinterpret_cast(procAddr(device, "vkGetSamplerOpaqueCaptureDescriptorDataEXT")); +#endif +#if (defined(VK_QNX_external_memory_screen_buffer)) + fp_vkGetScreenBufferPropertiesQNX = reinterpret_cast(procAddr(device, "vkGetScreenBufferPropertiesQNX")); +#endif +#if (defined(VK_VERSION_1_2)) + fp_vkGetSemaphoreCounterValue = reinterpret_cast(procAddr(device, "vkGetSemaphoreCounterValue")); +#endif +#if (defined(VK_KHR_timeline_semaphore)) + fp_vkGetSemaphoreCounterValueKHR = reinterpret_cast(procAddr(device, "vkGetSemaphoreCounterValueKHR")); +#endif +#if (defined(VK_KHR_external_semaphore_fd)) + fp_vkGetSemaphoreFdKHR = reinterpret_cast(procAddr(device, "vkGetSemaphoreFdKHR")); +#endif +#if (defined(VK_KHR_external_semaphore_win32)) + fp_vkGetSemaphoreWin32HandleKHR = reinterpret_cast(procAddr(device, "vkGetSemaphoreWin32HandleKHR")); +#endif +#if (defined(VK_FUCHSIA_external_semaphore)) + fp_vkGetSemaphoreZirconHandleFUCHSIA = reinterpret_cast(procAddr(device, "vkGetSemaphoreZirconHandleFUCHSIA")); +#endif +#if (defined(VK_EXT_shader_object)) + fp_vkGetShaderBinaryDataEXT = reinterpret_cast(procAddr(device, "vkGetShaderBinaryDataEXT")); +#endif +#if (defined(VK_AMD_shader_info)) + fp_vkGetShaderInfoAMD = reinterpret_cast(procAddr(device, "vkGetShaderInfoAMD")); +#endif +#if (defined(VK_EXT_shader_module_identifier)) + fp_vkGetShaderModuleCreateInfoIdentifierEXT = reinterpret_cast(procAddr(device, "vkGetShaderModuleCreateInfoIdentifierEXT")); +#endif +#if (defined(VK_EXT_shader_module_identifier)) + fp_vkGetShaderModuleIdentifierEXT = reinterpret_cast(procAddr(device, "vkGetShaderModuleIdentifierEXT")); +#endif +#if (defined(VK_EXT_display_control)) + fp_vkGetSwapchainCounterEXT = reinterpret_cast(procAddr(device, "vkGetSwapchainCounterEXT")); +#endif +#if (defined(VK_OHOS_native_buffer)) + fp_vkGetSwapchainGrallocUsageOHOS = reinterpret_cast(procAddr(device, "vkGetSwapchainGrallocUsageOHOS")); +#endif +#if (defined(VK_KHR_swapchain)) + fp_vkGetSwapchainImagesKHR = reinterpret_cast(procAddr(device, "vkGetSwapchainImagesKHR")); +#endif +#if (defined(VK_KHR_shared_presentable_image)) + fp_vkGetSwapchainStatusKHR = reinterpret_cast(procAddr(device, "vkGetSwapchainStatusKHR")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkGetTensorMemoryRequirementsARM = reinterpret_cast(procAddr(device, "vkGetTensorMemoryRequirementsARM")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkGetTensorOpaqueCaptureDescriptorDataARM = reinterpret_cast(procAddr(device, "vkGetTensorOpaqueCaptureDescriptorDataARM")); +#endif +#if (defined(VK_ARM_tensors)) + fp_vkGetTensorViewOpaqueCaptureDescriptorDataARM = reinterpret_cast(procAddr(device, "vkGetTensorViewOpaqueCaptureDescriptorDataARM")); +#endif +#if (defined(VK_EXT_validation_cache)) + fp_vkGetValidationCacheDataEXT = reinterpret_cast(procAddr(device, "vkGetValidationCacheDataEXT")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkGetVideoSessionMemoryRequirementsKHR = reinterpret_cast(procAddr(device, "vkGetVideoSessionMemoryRequirementsKHR")); +#endif +#if (defined(VK_KHR_external_fence_fd)) + fp_vkImportFenceFdKHR = reinterpret_cast(procAddr(device, "vkImportFenceFdKHR")); +#endif +#if (defined(VK_KHR_external_fence_win32)) + fp_vkImportFenceWin32HandleKHR = reinterpret_cast(procAddr(device, "vkImportFenceWin32HandleKHR")); +#endif +#if (defined(VK_KHR_external_semaphore_fd)) + fp_vkImportSemaphoreFdKHR = reinterpret_cast(procAddr(device, "vkImportSemaphoreFdKHR")); +#endif +#if (defined(VK_KHR_external_semaphore_win32)) + fp_vkImportSemaphoreWin32HandleKHR = reinterpret_cast(procAddr(device, "vkImportSemaphoreWin32HandleKHR")); +#endif +#if (defined(VK_FUCHSIA_external_semaphore)) + fp_vkImportSemaphoreZirconHandleFUCHSIA = reinterpret_cast(procAddr(device, "vkImportSemaphoreZirconHandleFUCHSIA")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkInitializePerformanceApiINTEL = reinterpret_cast(procAddr(device, "vkInitializePerformanceApiINTEL")); +#endif + fp_vkInvalidateMappedMemoryRanges = reinterpret_cast(procAddr(device, "vkInvalidateMappedMemoryRanges")); +#if (defined(VK_NV_low_latency2)) + fp_vkLatencySleepNV = reinterpret_cast(procAddr(device, "vkLatencySleepNV")); +#endif + fp_vkMapMemory = reinterpret_cast(procAddr(device, "vkMapMemory")); +#if (defined(VK_VERSION_1_4)) + fp_vkMapMemory2 = reinterpret_cast(procAddr(device, "vkMapMemory2")); +#endif +#if (defined(VK_KHR_map_memory2)) + fp_vkMapMemory2KHR = reinterpret_cast(procAddr(device, "vkMapMemory2KHR")); +#endif + fp_vkMergePipelineCaches = reinterpret_cast(procAddr(device, "vkMergePipelineCaches")); +#if (defined(VK_EXT_validation_cache)) + fp_vkMergeValidationCachesEXT = reinterpret_cast(procAddr(device, "vkMergeValidationCachesEXT")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkQueueBeginDebugUtilsLabelEXT = reinterpret_cast(procAddr(device, "vkQueueBeginDebugUtilsLabelEXT")); +#endif + fp_vkQueueBindSparse = reinterpret_cast(procAddr(device, "vkQueueBindSparse")); +#if (defined(VK_EXT_debug_utils)) + fp_vkQueueEndDebugUtilsLabelEXT = reinterpret_cast(procAddr(device, "vkQueueEndDebugUtilsLabelEXT")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkQueueInsertDebugUtilsLabelEXT = reinterpret_cast(procAddr(device, "vkQueueInsertDebugUtilsLabelEXT")); +#endif +#if (defined(VK_NV_low_latency2)) + fp_vkQueueNotifyOutOfBandNV = reinterpret_cast(procAddr(device, "vkQueueNotifyOutOfBandNV")); +#endif +#if (defined(VK_KHR_swapchain)) + fp_vkQueuePresentKHR = reinterpret_cast(procAddr(device, "vkQueuePresentKHR")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkQueueSetPerformanceConfigurationINTEL = reinterpret_cast(procAddr(device, "vkQueueSetPerformanceConfigurationINTEL")); +#endif +#if (defined(VK_OHOS_native_buffer)) + fp_vkQueueSignalReleaseImageOHOS = reinterpret_cast(procAddr(device, "vkQueueSignalReleaseImageOHOS")); +#endif + fp_vkQueueSubmit = reinterpret_cast(procAddr(device, "vkQueueSubmit")); +#if (defined(VK_VERSION_1_3)) + fp_vkQueueSubmit2 = reinterpret_cast(procAddr(device, "vkQueueSubmit2")); +#endif +#if (defined(VK_KHR_synchronization2)) + fp_vkQueueSubmit2KHR = reinterpret_cast(procAddr(device, "vkQueueSubmit2KHR")); +#endif + fp_vkQueueWaitIdle = reinterpret_cast(procAddr(device, "vkQueueWaitIdle")); +#if (defined(VK_EXT_display_control)) + fp_vkRegisterDeviceEventEXT = reinterpret_cast(procAddr(device, "vkRegisterDeviceEventEXT")); +#endif +#if (defined(VK_EXT_display_control)) + fp_vkRegisterDisplayEventEXT = reinterpret_cast(procAddr(device, "vkRegisterDisplayEventEXT")); +#endif +#if (defined(VK_KHR_pipeline_binary)) + fp_vkReleaseCapturedPipelineDataKHR = reinterpret_cast(procAddr(device, "vkReleaseCapturedPipelineDataKHR")); +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + fp_vkReleaseFullScreenExclusiveModeEXT = reinterpret_cast(procAddr(device, "vkReleaseFullScreenExclusiveModeEXT")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkReleasePerformanceConfigurationINTEL = reinterpret_cast(procAddr(device, "vkReleasePerformanceConfigurationINTEL")); +#endif +#if (defined(VK_KHR_performance_query)) + fp_vkReleaseProfilingLockKHR = reinterpret_cast(procAddr(device, "vkReleaseProfilingLockKHR")); +#endif +#if (defined(VK_EXT_swapchain_maintenance1)) + fp_vkReleaseSwapchainImagesEXT = reinterpret_cast(procAddr(device, "vkReleaseSwapchainImagesEXT")); +#endif +#if (defined(VK_KHR_swapchain_maintenance1)) + fp_vkReleaseSwapchainImagesKHR = reinterpret_cast(procAddr(device, "vkReleaseSwapchainImagesKHR")); +#endif + fp_vkResetCommandBuffer = reinterpret_cast(procAddr(device, "vkResetCommandBuffer")); + fp_vkResetCommandPool = reinterpret_cast(procAddr(device, "vkResetCommandPool")); + fp_vkResetDescriptorPool = reinterpret_cast(procAddr(device, "vkResetDescriptorPool")); + fp_vkResetEvent = reinterpret_cast(procAddr(device, "vkResetEvent")); + fp_vkResetFences = reinterpret_cast(procAddr(device, "vkResetFences")); +#if (defined(VK_VERSION_1_2)) + fp_vkResetQueryPool = reinterpret_cast(procAddr(device, "vkResetQueryPool")); +#endif +#if (defined(VK_EXT_host_query_reset)) + fp_vkResetQueryPoolEXT = reinterpret_cast(procAddr(device, "vkResetQueryPoolEXT")); +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + fp_vkSetBufferCollectionBufferConstraintsFUCHSIA = reinterpret_cast(procAddr(device, "vkSetBufferCollectionBufferConstraintsFUCHSIA")); +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + fp_vkSetBufferCollectionImageConstraintsFUCHSIA = reinterpret_cast(procAddr(device, "vkSetBufferCollectionImageConstraintsFUCHSIA")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkSetDebugUtilsObjectNameEXT = reinterpret_cast(procAddr(device, "vkSetDebugUtilsObjectNameEXT")); +#endif +#if (defined(VK_EXT_debug_utils)) + fp_vkSetDebugUtilsObjectTagEXT = reinterpret_cast(procAddr(device, "vkSetDebugUtilsObjectTagEXT")); +#endif +#if (defined(VK_EXT_pageable_device_local_memory)) + fp_vkSetDeviceMemoryPriorityEXT = reinterpret_cast(procAddr(device, "vkSetDeviceMemoryPriorityEXT")); +#endif + fp_vkSetEvent = reinterpret_cast(procAddr(device, "vkSetEvent")); +#if (defined(VK_EXT_hdr_metadata)) + fp_vkSetHdrMetadataEXT = reinterpret_cast(procAddr(device, "vkSetHdrMetadataEXT")); +#endif +#if (defined(VK_NV_low_latency2)) + fp_vkSetLatencyMarkerNV = reinterpret_cast(procAddr(device, "vkSetLatencyMarkerNV")); +#endif +#if (defined(VK_NV_low_latency2)) + fp_vkSetLatencySleepModeNV = reinterpret_cast(procAddr(device, "vkSetLatencySleepModeNV")); +#endif +#if (defined(VK_AMD_display_native_hdr)) + fp_vkSetLocalDimmingAMD = reinterpret_cast(procAddr(device, "vkSetLocalDimmingAMD")); +#endif +#if (defined(VK_VERSION_1_3)) + fp_vkSetPrivateData = reinterpret_cast(procAddr(device, "vkSetPrivateData")); +#endif +#if (defined(VK_EXT_private_data)) + fp_vkSetPrivateDataEXT = reinterpret_cast(procAddr(device, "vkSetPrivateDataEXT")); +#endif +#if (defined(VK_VERSION_1_2)) + fp_vkSignalSemaphore = reinterpret_cast(procAddr(device, "vkSignalSemaphore")); +#endif +#if (defined(VK_KHR_timeline_semaphore)) + fp_vkSignalSemaphoreKHR = reinterpret_cast(procAddr(device, "vkSignalSemaphoreKHR")); +#endif +#if (defined(VK_VERSION_1_4)) + fp_vkTransitionImageLayout = reinterpret_cast(procAddr(device, "vkTransitionImageLayout")); +#endif +#if (defined(VK_EXT_host_image_copy)) + fp_vkTransitionImageLayoutEXT = reinterpret_cast(procAddr(device, "vkTransitionImageLayoutEXT")); +#endif +#if (defined(VK_VERSION_1_1)) + fp_vkTrimCommandPool = reinterpret_cast(procAddr(device, "vkTrimCommandPool")); +#endif +#if (defined(VK_KHR_maintenance1)) + fp_vkTrimCommandPoolKHR = reinterpret_cast(procAddr(device, "vkTrimCommandPoolKHR")); +#endif +#if (defined(VK_INTEL_performance_query)) + fp_vkUninitializePerformanceApiINTEL = reinterpret_cast(procAddr(device, "vkUninitializePerformanceApiINTEL")); +#endif + fp_vkUnmapMemory = reinterpret_cast(procAddr(device, "vkUnmapMemory")); +#if (defined(VK_VERSION_1_4)) + fp_vkUnmapMemory2 = reinterpret_cast(procAddr(device, "vkUnmapMemory2")); +#endif +#if (defined(VK_KHR_map_memory2)) + fp_vkUnmapMemory2KHR = reinterpret_cast(procAddr(device, "vkUnmapMemory2KHR")); +#endif +#if (defined(VK_VERSION_1_1)) + fp_vkUpdateDescriptorSetWithTemplate = reinterpret_cast(procAddr(device, "vkUpdateDescriptorSetWithTemplate")); +#endif +#if (defined(VK_KHR_descriptor_update_template)) + fp_vkUpdateDescriptorSetWithTemplateKHR = reinterpret_cast(procAddr(device, "vkUpdateDescriptorSetWithTemplateKHR")); +#endif + fp_vkUpdateDescriptorSets = reinterpret_cast(procAddr(device, "vkUpdateDescriptorSets")); +#if (defined(VK_EXT_device_generated_commands)) + fp_vkUpdateIndirectExecutionSetPipelineEXT = reinterpret_cast(procAddr(device, "vkUpdateIndirectExecutionSetPipelineEXT")); +#endif +#if (defined(VK_EXT_device_generated_commands)) + fp_vkUpdateIndirectExecutionSetShaderEXT = reinterpret_cast(procAddr(device, "vkUpdateIndirectExecutionSetShaderEXT")); +#endif +#if (defined(VK_KHR_video_queue)) + fp_vkUpdateVideoSessionParametersKHR = reinterpret_cast(procAddr(device, "vkUpdateVideoSessionParametersKHR")); +#endif + fp_vkWaitForFences = reinterpret_cast(procAddr(device, "vkWaitForFences")); +#if (defined(VK_KHR_present_wait2)) + fp_vkWaitForPresent2KHR = reinterpret_cast(procAddr(device, "vkWaitForPresent2KHR")); +#endif +#if (defined(VK_KHR_present_wait)) + fp_vkWaitForPresentKHR = reinterpret_cast(procAddr(device, "vkWaitForPresentKHR")); +#endif +#if (defined(VK_VERSION_1_2)) + fp_vkWaitSemaphores = reinterpret_cast(procAddr(device, "vkWaitSemaphores")); +#endif +#if (defined(VK_KHR_timeline_semaphore)) + fp_vkWaitSemaphoresKHR = reinterpret_cast(procAddr(device, "vkWaitSemaphoresKHR")); +#endif +#if (defined(VK_KHR_acceleration_structure)) + fp_vkWriteAccelerationStructuresPropertiesKHR = reinterpret_cast(procAddr(device, "vkWriteAccelerationStructuresPropertiesKHR")); +#endif +#if (defined(VK_EXT_opacity_micromap)) + fp_vkWriteMicromapsPropertiesEXT = reinterpret_cast(procAddr(device, "vkWriteMicromapsPropertiesEXT")); +#endif + } +#if (defined(VK_EXT_full_screen_exclusive)) + VkResult acquireFullScreenExclusiveModeEXT(VkSwapchainKHR swapchain) const noexcept { + return fp_vkAcquireFullScreenExclusiveModeEXT(device, swapchain); + } +#endif +#if (defined(VK_OHOS_native_buffer)) + VkResult acquireImageOHOS(VkImage image, int32_t nativeFenceFd, VkSemaphore semaphore, VkFence fence) const noexcept { + return fp_vkAcquireImageOHOS(device, image, nativeFenceFd, semaphore, fence); + } +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + VkResult acquireNextImage2KHR(const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex) const noexcept { + return fp_vkAcquireNextImage2KHR(device, pAcquireInfo, pImageIndex); + } +#endif +#if (defined(VK_KHR_swapchain)) + VkResult acquireNextImageKHR(VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex) const noexcept { + return fp_vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex); + } +#endif +#if (defined(VK_INTEL_performance_query)) + VkResult acquirePerformanceConfigurationINTEL(const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration) const noexcept { + return fp_vkAcquirePerformanceConfigurationINTEL(device, pAcquireInfo, pConfiguration); + } +#endif +#if (defined(VK_KHR_performance_query)) + VkResult acquireProfilingLockKHR(const VkAcquireProfilingLockInfoKHR* pInfo) const noexcept { + return fp_vkAcquireProfilingLockKHR(device, pInfo); + } +#endif + VkResult allocateCommandBuffers(const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) const noexcept { + return fp_vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers); + } + VkResult allocateDescriptorSets(const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets) const noexcept { + return fp_vkAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); + } + VkResult allocateMemory(const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory) const noexcept { + return fp_vkAllocateMemory(device, pAllocateInfo, pAllocator, pMemory); + } +#if (defined(VK_AMD_anti_lag)) + void antiLagUpdateAMD(const VkAntiLagDataAMD* pData) const noexcept { + fp_vkAntiLagUpdateAMD(device, pData); + } +#endif + VkResult beginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo) const noexcept { + return fp_vkBeginCommandBuffer(commandBuffer, pBeginInfo); + } +#if (defined(VK_NV_ray_tracing)) + VkResult bindAccelerationStructureMemoryNV(uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) const noexcept { + return fp_vkBindAccelerationStructureMemoryNV(device, bindInfoCount, pBindInfos); + } +#endif + VkResult bindBufferMemory(VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset) const noexcept { + return fp_vkBindBufferMemory(device, buffer, memory, memoryOffset); + } +#if (defined(VK_VERSION_1_1)) + VkResult bindBufferMemory2(uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos) const noexcept { + return fp_vkBindBufferMemory2(device, bindInfoCount, pBindInfos); + } +#endif +#if (defined(VK_KHR_bind_memory2)) + VkResult bindBufferMemory2KHR(uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos) const noexcept { + return fp_vkBindBufferMemory2KHR(device, bindInfoCount, pBindInfos); + } +#endif +#if (defined(VK_ARM_data_graph)) + VkResult bindDataGraphPipelineSessionMemoryARM(uint32_t bindInfoCount, const VkBindDataGraphPipelineSessionMemoryInfoARM* pBindInfos) const noexcept { + return fp_vkBindDataGraphPipelineSessionMemoryARM(device, bindInfoCount, pBindInfos); + } +#endif + VkResult bindImageMemory(VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset) const noexcept { + return fp_vkBindImageMemory(device, image, memory, memoryOffset); + } +#if (defined(VK_VERSION_1_1)) + VkResult bindImageMemory2(uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos) const noexcept { + return fp_vkBindImageMemory2(device, bindInfoCount, pBindInfos); + } +#endif +#if (defined(VK_KHR_bind_memory2)) + VkResult bindImageMemory2KHR(uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos) const noexcept { + return fp_vkBindImageMemory2KHR(device, bindInfoCount, pBindInfos); + } +#endif +#if (defined(VK_NV_optical_flow)) + VkResult bindOpticalFlowSessionImageNV(VkOpticalFlowSessionNV session, VkOpticalFlowSessionBindingPointNV bindingPoint, VkImageView view, VkImageLayout layout) const noexcept { + return fp_vkBindOpticalFlowSessionImageNV(device, session, bindingPoint, view, layout); + } +#endif +#if (defined(VK_ARM_tensors)) + VkResult bindTensorMemoryARM(uint32_t bindInfoCount, const VkBindTensorMemoryInfoARM* pBindInfos) const noexcept { + return fp_vkBindTensorMemoryARM(device, bindInfoCount, pBindInfos); + } +#endif +#if (defined(VK_KHR_video_queue)) + VkResult bindVideoSessionMemoryKHR(VkVideoSessionKHR videoSession, uint32_t bindSessionMemoryInfoCount, const VkBindVideoSessionMemoryInfoKHR* pBindSessionMemoryInfos) const noexcept { + return fp_vkBindVideoSessionMemoryKHR(device, videoSession, bindSessionMemoryInfoCount, pBindSessionMemoryInfos); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + VkResult buildAccelerationStructuresKHR(VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) const noexcept { + return fp_vkBuildAccelerationStructuresKHR(device, deferredOperation, infoCount, pInfos, ppBuildRangeInfos); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + VkResult buildMicromapsEXT(VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkMicromapBuildInfoEXT* pInfos) const noexcept { + return fp_vkBuildMicromapsEXT(device, deferredOperation, infoCount, pInfos); + } +#endif +#if (defined(VK_EXT_conditional_rendering)) + void cmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin) const noexcept { + fp_vkCmdBeginConditionalRenderingEXT(commandBuffer, pConditionalRenderingBegin); + } +#endif +#if (defined(VK_EXT_debug_utils)) + void cmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo) const noexcept { + fp_vkCmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo); + } +#endif +#if (defined(VK_QCOM_tile_shading)) + void cmdBeginPerTileExecutionQCOM(VkCommandBuffer commandBuffer, const VkPerTileBeginInfoQCOM* pPerTileBeginInfo) const noexcept { + fp_vkCmdBeginPerTileExecutionQCOM(commandBuffer, pPerTileBeginInfo); + } +#endif + void cmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags) const noexcept { + fp_vkCmdBeginQuery(commandBuffer, queryPool, query, flags); + } +#if (defined(VK_EXT_transform_feedback)) + void cmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) const noexcept { + fp_vkCmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, index); + } +#endif + void cmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents) const noexcept { + fp_vkCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); + } +#if (defined(VK_VERSION_1_2)) + void cmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo) const noexcept { + fp_vkCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); + } +#endif +#if (defined(VK_KHR_create_renderpass2)) + void cmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfoKHR* pSubpassBeginInfo) const noexcept { + fp_vkCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdBeginRendering(VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo) const noexcept { + fp_vkCmdBeginRendering(commandBuffer, pRenderingInfo); + } +#endif +#if (defined(VK_KHR_dynamic_rendering)) + void cmdBeginRenderingKHR(VkCommandBuffer commandBuffer, const VkRenderingInfoKHR* pRenderingInfo) const noexcept { + fp_vkCmdBeginRenderingKHR(commandBuffer, pRenderingInfo); + } +#endif +#if (defined(VK_EXT_transform_feedback)) + void cmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets) const noexcept { + fp_vkCmdBeginTransformFeedbackEXT(commandBuffer, firstCounterBuffer, counterBufferCount, pCounterBuffers, pCounterBufferOffsets); + } +#endif +#if (defined(VK_KHR_video_queue)) + void cmdBeginVideoCodingKHR(VkCommandBuffer commandBuffer, const VkVideoBeginCodingInfoKHR* pBeginInfo) const noexcept { + fp_vkCmdBeginVideoCodingKHR(commandBuffer, pBeginInfo); + } +#endif +#if (defined(VK_KHR_maintenance6)) + void cmdBindDescriptorBufferEmbeddedSamplers2EXT(VkCommandBuffer commandBuffer, const VkBindDescriptorBufferEmbeddedSamplersInfoEXT* pBindDescriptorBufferEmbeddedSamplersInfo) const noexcept { + fp_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT(commandBuffer, pBindDescriptorBufferEmbeddedSamplersInfo); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + void cmdBindDescriptorBufferEmbeddedSamplersEXT(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set) const noexcept { + fp_vkCmdBindDescriptorBufferEmbeddedSamplersEXT(commandBuffer, pipelineBindPoint, layout, set); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + void cmdBindDescriptorBuffersEXT(VkCommandBuffer commandBuffer, uint32_t bufferCount, const VkDescriptorBufferBindingInfoEXT* pBindingInfos) const noexcept { + fp_vkCmdBindDescriptorBuffersEXT(commandBuffer, bufferCount, pBindingInfos); + } +#endif + void cmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets) const noexcept { + fp_vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets); + } +#if (defined(VK_VERSION_1_4)) + void cmdBindDescriptorSets2(VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo) const noexcept { + fp_vkCmdBindDescriptorSets2(commandBuffer, pBindDescriptorSetsInfo); + } +#endif +#if (defined(VK_KHR_maintenance6)) + void cmdBindDescriptorSets2KHR(VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfoKHR* pBindDescriptorSetsInfo) const noexcept { + fp_vkCmdBindDescriptorSets2KHR(commandBuffer, pBindDescriptorSetsInfo); + } +#endif + void cmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const noexcept { + fp_vkCmdBindIndexBuffer(commandBuffer, buffer, offset, indexType); + } +#if (defined(VK_VERSION_1_4)) + void cmdBindIndexBuffer2(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size, VkIndexType indexType) const noexcept { + fp_vkCmdBindIndexBuffer2(commandBuffer, buffer, offset, size, indexType); + } +#endif +#if (defined(VK_KHR_maintenance5)) + void cmdBindIndexBuffer2KHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size, VkIndexType indexType) const noexcept { + fp_vkCmdBindIndexBuffer2KHR(commandBuffer, buffer, offset, size, indexType); + } +#endif +#if (defined(VK_HUAWEI_invocation_mask)) + void cmdBindInvocationMaskHUAWEI(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) const noexcept { + fp_vkCmdBindInvocationMaskHUAWEI(commandBuffer, imageView, imageLayout); + } +#endif + void cmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const noexcept { + fp_vkCmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline); + } +#if (defined(VK_NV_device_generated_commands)) + void cmdBindPipelineShaderGroupNV(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex) const noexcept { + fp_vkCmdBindPipelineShaderGroupNV(commandBuffer, pipelineBindPoint, pipeline, groupIndex); + } +#endif +#if (defined(VK_EXT_shader_object)) + void cmdBindShadersEXT(VkCommandBuffer commandBuffer, uint32_t stageCount, const VkShaderStageFlagBits* pStages, const VkShaderEXT* pShaders) const noexcept { + fp_vkCmdBindShadersEXT(commandBuffer, stageCount, pStages, pShaders); + } +#endif +#if (defined(VK_NV_shading_rate_image)) + void cmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) const noexcept { + fp_vkCmdBindShadingRateImageNV(commandBuffer, imageView, imageLayout); + } +#endif +#if (defined(VK_QCOM_tile_memory_heap)) + void cmdBindTileMemoryQCOM(VkCommandBuffer commandBuffer, const VkTileMemoryBindInfoQCOM* pTileMemoryBindInfo) const noexcept { + fp_vkCmdBindTileMemoryQCOM(commandBuffer, pTileMemoryBindInfo); + } +#endif +#if (defined(VK_EXT_transform_feedback)) + void cmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes) const noexcept { + fp_vkCmdBindTransformFeedbackBuffersEXT(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes); + } +#endif + void cmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets) const noexcept { + fp_vkCmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets); + } +#if (defined(VK_VERSION_1_3)) + void cmdBindVertexBuffers2(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides) const noexcept { + fp_vkCmdBindVertexBuffers2(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes, pStrides); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides) const noexcept { + fp_vkCmdBindVertexBuffers2EXT(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes, pStrides); + } +#endif + void cmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter) const noexcept { + fp_vkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter); + } +#if (defined(VK_VERSION_1_3)) + void cmdBlitImage2(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo) const noexcept { + fp_vkCmdBlitImage2(commandBuffer, pBlitImageInfo); + } +#endif +#if (defined(VK_KHR_copy_commands2)) + void cmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR* pBlitImageInfo) const noexcept { + fp_vkCmdBlitImage2KHR(commandBuffer, pBlitImageInfo); + } +#endif +#if (defined(VK_NV_ray_tracing)) + void cmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) const noexcept { + fp_vkCmdBuildAccelerationStructureNV(commandBuffer, pInfo, instanceData, instanceOffset, update, dst, src, scratch, scratchOffset); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + void cmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts) const noexcept { + fp_vkCmdBuildAccelerationStructuresIndirectKHR(commandBuffer, infoCount, pInfos, pIndirectDeviceAddresses, pIndirectStrides, ppMaxPrimitiveCounts); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + void cmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) const noexcept { + fp_vkCmdBuildAccelerationStructuresKHR(commandBuffer, infoCount, pInfos, ppBuildRangeInfos); + } +#endif +#if (defined(VK_NV_cluster_acceleration_structure)) + void cmdBuildClusterAccelerationStructureIndirectNV(VkCommandBuffer commandBuffer, const VkClusterAccelerationStructureCommandsInfoNV* pCommandInfos) const noexcept { + fp_vkCmdBuildClusterAccelerationStructureIndirectNV(commandBuffer, pCommandInfos); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + void cmdBuildMicromapsEXT(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkMicromapBuildInfoEXT* pInfos) const noexcept { + fp_vkCmdBuildMicromapsEXT(commandBuffer, infoCount, pInfos); + } +#endif +#if (defined(VK_NV_partitioned_acceleration_structure)) + void cmdBuildPartitionedAccelerationStructuresNV(VkCommandBuffer commandBuffer, const VkBuildPartitionedAccelerationStructureInfoNV* pBuildInfo) const noexcept { + fp_vkCmdBuildPartitionedAccelerationStructuresNV(commandBuffer, pBuildInfo); + } +#endif + void cmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects) const noexcept { + fp_vkCmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects); + } + void cmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges) const noexcept { + fp_vkCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); + } + void cmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges) const noexcept { + fp_vkCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges); + } +#if (defined(VK_KHR_video_queue)) + void cmdControlVideoCodingKHR(VkCommandBuffer commandBuffer, const VkVideoCodingControlInfoKHR* pCodingControlInfo) const noexcept { + fp_vkCmdControlVideoCodingKHR(commandBuffer, pCodingControlInfo); + } +#endif +#if (defined(VK_NV_cooperative_vector)) + void cmdConvertCooperativeVectorMatrixNV(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkConvertCooperativeVectorMatrixInfoNV* pInfos) const noexcept { + fp_vkCmdConvertCooperativeVectorMatrixNV(commandBuffer, infoCount, pInfos); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + void cmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo) const noexcept { + fp_vkCmdCopyAccelerationStructureKHR(commandBuffer, pInfo); + } +#endif +#if (defined(VK_NV_ray_tracing)) + void cmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode) const noexcept { + fp_vkCmdCopyAccelerationStructureNV(commandBuffer, dst, src, mode); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + void cmdCopyAccelerationStructureToMemoryKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) const noexcept { + fp_vkCmdCopyAccelerationStructureToMemoryKHR(commandBuffer, pInfo); + } +#endif + void cmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions) const noexcept { + fp_vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions); + } +#if (defined(VK_VERSION_1_3)) + void cmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo) const noexcept { + fp_vkCmdCopyBuffer2(commandBuffer, pCopyBufferInfo); + } +#endif +#if (defined(VK_KHR_copy_commands2)) + void cmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR* pCopyBufferInfo) const noexcept { + fp_vkCmdCopyBuffer2KHR(commandBuffer, pCopyBufferInfo); + } +#endif + void cmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions) const noexcept { + fp_vkCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions); + } +#if (defined(VK_VERSION_1_3)) + void cmdCopyBufferToImage2(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo) const noexcept { + fp_vkCmdCopyBufferToImage2(commandBuffer, pCopyBufferToImageInfo); + } +#endif +#if (defined(VK_KHR_copy_commands2)) + void cmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo) const noexcept { + fp_vkCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo); + } +#endif + void cmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions) const noexcept { + fp_vkCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); + } +#if (defined(VK_VERSION_1_3)) + void cmdCopyImage2(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo) const noexcept { + fp_vkCmdCopyImage2(commandBuffer, pCopyImageInfo); + } +#endif +#if (defined(VK_KHR_copy_commands2)) + void cmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR* pCopyImageInfo) const noexcept { + fp_vkCmdCopyImage2KHR(commandBuffer, pCopyImageInfo); + } +#endif + void cmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions) const noexcept { + fp_vkCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions); + } +#if (defined(VK_VERSION_1_3)) + void cmdCopyImageToBuffer2(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo) const noexcept { + fp_vkCmdCopyImageToBuffer2(commandBuffer, pCopyImageToBufferInfo); + } +#endif +#if (defined(VK_KHR_copy_commands2)) + void cmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo) const noexcept { + fp_vkCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo); + } +#endif +#if (defined(VK_KHR_copy_memory_indirect)) + void cmdCopyMemoryIndirectKHR(VkCommandBuffer commandBuffer, const VkCopyMemoryIndirectInfoKHR* pCopyMemoryIndirectInfo) const noexcept { + fp_vkCmdCopyMemoryIndirectKHR(commandBuffer, pCopyMemoryIndirectInfo); + } +#endif +#if (defined(VK_NV_copy_memory_indirect)) + void cmdCopyMemoryIndirectNV(VkCommandBuffer commandBuffer, VkDeviceAddress copyBufferAddress, uint32_t copyCount, uint32_t stride) const noexcept { + fp_vkCmdCopyMemoryIndirectNV(commandBuffer, copyBufferAddress, copyCount, stride); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + void cmdCopyMemoryToAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) const noexcept { + fp_vkCmdCopyMemoryToAccelerationStructureKHR(commandBuffer, pInfo); + } +#endif +#if (defined(VK_KHR_copy_memory_indirect)) + void cmdCopyMemoryToImageIndirectKHR(VkCommandBuffer commandBuffer, const VkCopyMemoryToImageIndirectInfoKHR* pCopyMemoryToImageIndirectInfo) const noexcept { + fp_vkCmdCopyMemoryToImageIndirectKHR(commandBuffer, pCopyMemoryToImageIndirectInfo); + } +#endif +#if (defined(VK_NV_copy_memory_indirect)) + void cmdCopyMemoryToImageIndirectNV(VkCommandBuffer commandBuffer, VkDeviceAddress copyBufferAddress, uint32_t copyCount, uint32_t stride, VkImage dstImage, VkImageLayout dstImageLayout, const VkImageSubresourceLayers* pImageSubresources) const noexcept { + fp_vkCmdCopyMemoryToImageIndirectNV(commandBuffer, copyBufferAddress, copyCount, stride, dstImage, dstImageLayout, pImageSubresources); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + void cmdCopyMemoryToMicromapEXT(VkCommandBuffer commandBuffer, const VkCopyMemoryToMicromapInfoEXT* pInfo) const noexcept { + fp_vkCmdCopyMemoryToMicromapEXT(commandBuffer, pInfo); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + void cmdCopyMicromapEXT(VkCommandBuffer commandBuffer, const VkCopyMicromapInfoEXT* pInfo) const noexcept { + fp_vkCmdCopyMicromapEXT(commandBuffer, pInfo); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + void cmdCopyMicromapToMemoryEXT(VkCommandBuffer commandBuffer, const VkCopyMicromapToMemoryInfoEXT* pInfo) const noexcept { + fp_vkCmdCopyMicromapToMemoryEXT(commandBuffer, pInfo); + } +#endif + void cmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) const noexcept { + fp_vkCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags); + } +#if (defined(VK_ARM_tensors)) + void cmdCopyTensorARM(VkCommandBuffer commandBuffer, const VkCopyTensorInfoARM* pCopyTensorInfo) const noexcept { + fp_vkCmdCopyTensorARM(commandBuffer, pCopyTensorInfo); + } +#endif +#if (defined(VK_NVX_binary_import)) + void cmdCuLaunchKernelNVX(VkCommandBuffer commandBuffer, const VkCuLaunchInfoNVX* pLaunchInfo) const noexcept { + fp_vkCmdCuLaunchKernelNVX(commandBuffer, pLaunchInfo); + } +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + void cmdCudaLaunchKernelNV(VkCommandBuffer commandBuffer, const VkCudaLaunchInfoNV* pLaunchInfo) const noexcept { + fp_vkCmdCudaLaunchKernelNV(commandBuffer, pLaunchInfo); + } +#endif +#if (defined(VK_EXT_debug_marker)) + void cmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo) const noexcept { + fp_vkCmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo); + } +#endif +#if (defined(VK_EXT_debug_marker)) + void cmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const noexcept { + fp_vkCmdDebugMarkerEndEXT(commandBuffer); + } +#endif +#if (defined(VK_EXT_debug_marker)) + void cmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo) const noexcept { + fp_vkCmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo); + } +#endif +#if (defined(VK_KHR_video_decode_queue)) + void cmdDecodeVideoKHR(VkCommandBuffer commandBuffer, const VkVideoDecodeInfoKHR* pDecodeInfo) const noexcept { + fp_vkCmdDecodeVideoKHR(commandBuffer, pDecodeInfo); + } +#endif +#if (defined(VK_EXT_memory_decompression)) + void cmdDecompressMemoryEXT(VkCommandBuffer commandBuffer, const VkDecompressMemoryInfoEXT* pDecompressMemoryInfoEXT) const noexcept { + fp_vkCmdDecompressMemoryEXT(commandBuffer, pDecompressMemoryInfoEXT); + } +#endif +#if (defined(VK_EXT_memory_decompression)) + void cmdDecompressMemoryIndirectCountEXT(VkCommandBuffer commandBuffer, VkMemoryDecompressionMethodFlagsEXT decompressionMethod, VkDeviceAddress indirectCommandsAddress, VkDeviceAddress indirectCommandsCountAddress, uint32_t maxDecompressionCount, uint32_t stride) const noexcept { + fp_vkCmdDecompressMemoryIndirectCountEXT(commandBuffer, decompressionMethod, indirectCommandsAddress, indirectCommandsCountAddress, maxDecompressionCount, stride); + } +#endif +#if (defined(VK_NV_memory_decompression)) + void cmdDecompressMemoryIndirectCountNV(VkCommandBuffer commandBuffer, VkDeviceAddress indirectCommandsAddress, VkDeviceAddress indirectCommandsCountAddress, uint32_t stride) const noexcept { + fp_vkCmdDecompressMemoryIndirectCountNV(commandBuffer, indirectCommandsAddress, indirectCommandsCountAddress, stride); + } +#endif +#if (defined(VK_NV_memory_decompression)) + void cmdDecompressMemoryNV(VkCommandBuffer commandBuffer, uint32_t decompressRegionCount, const VkDecompressMemoryRegionNV* pDecompressMemoryRegions) const noexcept { + fp_vkCmdDecompressMemoryNV(commandBuffer, decompressRegionCount, pDecompressMemoryRegions); + } +#endif + void cmdDispatch(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) const noexcept { + fp_vkCmdDispatch(commandBuffer, groupCountX, groupCountY, groupCountZ); + } +#if (defined(VK_VERSION_1_1)) + void cmdDispatchBase(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) const noexcept { + fp_vkCmdDispatchBase(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ); + } +#endif +#if (defined(VK_KHR_device_group)) + void cmdDispatchBaseKHR(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) const noexcept { + fp_vkCmdDispatchBaseKHR(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ); + } +#endif +#if (defined(VK_ARM_data_graph)) + void cmdDispatchDataGraphARM(VkCommandBuffer commandBuffer, VkDataGraphPipelineSessionARM session, const VkDataGraphPipelineDispatchInfoARM* pInfo) const noexcept { + fp_vkCmdDispatchDataGraphARM(commandBuffer, session, pInfo); + } +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + void cmdDispatchGraphAMDX(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, VkDeviceSize scratchSize, const VkDispatchGraphCountInfoAMDX* pCountInfo) const noexcept { + fp_vkCmdDispatchGraphAMDX(commandBuffer, scratch, scratchSize, pCountInfo); + } +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + void cmdDispatchGraphIndirectAMDX(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, VkDeviceSize scratchSize, const VkDispatchGraphCountInfoAMDX* pCountInfo) const noexcept { + fp_vkCmdDispatchGraphIndirectAMDX(commandBuffer, scratch, scratchSize, pCountInfo); + } +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + void cmdDispatchGraphIndirectCountAMDX(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, VkDeviceSize scratchSize, VkDeviceAddress countInfo) const noexcept { + fp_vkCmdDispatchGraphIndirectCountAMDX(commandBuffer, scratch, scratchSize, countInfo); + } +#endif + void cmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const noexcept { + fp_vkCmdDispatchIndirect(commandBuffer, buffer, offset); + } +#if (defined(VK_QCOM_tile_shading)) && VK_HEADER_VERSION >= 316 + void cmdDispatchTileQCOM(VkCommandBuffer commandBuffer, const VkDispatchTileInfoQCOM* pDispatchTileInfo) const noexcept { + fp_vkCmdDispatchTileQCOM(commandBuffer, pDispatchTileInfo); + } +#endif + void cmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) const noexcept { + fp_vkCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); + } +#if (defined(VK_HUAWEI_cluster_culling_shader)) + void cmdDrawClusterHUAWEI(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) const noexcept { + fp_vkCmdDrawClusterHUAWEI(commandBuffer, groupCountX, groupCountY, groupCountZ); + } +#endif +#if (defined(VK_HUAWEI_cluster_culling_shader)) + void cmdDrawClusterIndirectHUAWEI(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const noexcept { + fp_vkCmdDrawClusterIndirectHUAWEI(commandBuffer, buffer, offset); + } +#endif + void cmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const noexcept { + fp_vkCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); + } + void cmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride); + } +#if (defined(VK_VERSION_1_2)) + void cmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } +#endif +#if (defined(VK_AMD_draw_indirect_count)) + void cmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } +#endif +#if (defined(VK_KHR_draw_indirect_count)) + void cmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } +#endif + void cmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride); + } +#if (defined(VK_EXT_transform_feedback)) + void cmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride) const noexcept { + fp_vkCmdDrawIndirectByteCountEXT(commandBuffer, instanceCount, firstInstance, counterBuffer, counterBufferOffset, counterOffset, vertexStride); + } +#endif +#if (defined(VK_VERSION_1_2)) + void cmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } +#endif +#if (defined(VK_AMD_draw_indirect_count)) + void cmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } +#endif +#if (defined(VK_KHR_draw_indirect_count)) + void cmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } +#endif +#if (defined(VK_EXT_mesh_shader)) + void cmdDrawMeshTasksEXT(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) const noexcept { + fp_vkCmdDrawMeshTasksEXT(commandBuffer, groupCountX, groupCountY, groupCountZ); + } +#endif +#if (defined(VK_EXT_mesh_shader)) + void cmdDrawMeshTasksIndirectCountEXT(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawMeshTasksIndirectCountEXT(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } +#endif +#if (defined(VK_NV_mesh_shader)) + void cmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawMeshTasksIndirectCountNV(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); + } +#endif +#if (defined(VK_EXT_mesh_shader)) + void cmdDrawMeshTasksIndirectEXT(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawMeshTasksIndirectEXT(commandBuffer, buffer, offset, drawCount, stride); + } +#endif +#if (defined(VK_NV_mesh_shader)) + void cmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const noexcept { + fp_vkCmdDrawMeshTasksIndirectNV(commandBuffer, buffer, offset, drawCount, stride); + } +#endif +#if (defined(VK_NV_mesh_shader)) + void cmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) const noexcept { + fp_vkCmdDrawMeshTasksNV(commandBuffer, taskCount, firstTask); + } +#endif +#if (defined(VK_EXT_multi_draw)) + void cmdDrawMultiEXT(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawInfoEXT* pVertexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride) const noexcept { + fp_vkCmdDrawMultiEXT(commandBuffer, drawCount, pVertexInfo, instanceCount, firstInstance, stride); + } +#endif +#if (defined(VK_EXT_multi_draw)) + void cmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawIndexedInfoEXT* pIndexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride, const int32_t* pVertexOffset) const noexcept { + fp_vkCmdDrawMultiIndexedEXT(commandBuffer, drawCount, pIndexInfo, instanceCount, firstInstance, stride, pVertexOffset); + } +#endif +#if (defined(VK_KHR_video_encode_queue)) + void cmdEncodeVideoKHR(VkCommandBuffer commandBuffer, const VkVideoEncodeInfoKHR* pEncodeInfo) const noexcept { + fp_vkCmdEncodeVideoKHR(commandBuffer, pEncodeInfo); + } +#endif +#if (defined(VK_EXT_conditional_rendering)) + void cmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer) const noexcept { + fp_vkCmdEndConditionalRenderingEXT(commandBuffer); + } +#endif +#if (defined(VK_EXT_debug_utils)) + void cmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) const noexcept { + fp_vkCmdEndDebugUtilsLabelEXT(commandBuffer); + } +#endif +#if (defined(VK_QCOM_tile_shading)) + void cmdEndPerTileExecutionQCOM(VkCommandBuffer commandBuffer, const VkPerTileEndInfoQCOM* pPerTileEndInfo) const noexcept { + fp_vkCmdEndPerTileExecutionQCOM(commandBuffer, pPerTileEndInfo); + } +#endif + void cmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query) const noexcept { + fp_vkCmdEndQuery(commandBuffer, queryPool, query); + } +#if (defined(VK_EXT_transform_feedback)) + void cmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) const noexcept { + fp_vkCmdEndQueryIndexedEXT(commandBuffer, queryPool, query, index); + } +#endif + void cmdEndRenderPass(VkCommandBuffer commandBuffer) const noexcept { + fp_vkCmdEndRenderPass(commandBuffer); + } +#if (defined(VK_VERSION_1_2)) + void cmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo) const noexcept { + fp_vkCmdEndRenderPass2(commandBuffer, pSubpassEndInfo); + } +#endif +#if (defined(VK_KHR_create_renderpass2)) + void cmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo) const noexcept { + fp_vkCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdEndRendering(VkCommandBuffer commandBuffer) const noexcept { + fp_vkCmdEndRendering(commandBuffer); + } +#endif +#if (defined(VK_EXT_fragment_density_map_offset)) + void cmdEndRendering2EXT(VkCommandBuffer commandBuffer, const VkRenderingEndInfoEXT* pRenderingEndInfo) const noexcept { + fp_vkCmdEndRendering2EXT(commandBuffer, pRenderingEndInfo); + } +#endif +#if (defined(VK_KHR_maintenance10)) + void cmdEndRendering2KHR(VkCommandBuffer commandBuffer, const VkRenderingEndInfoKHR* pRenderingEndInfo) const noexcept { + fp_vkCmdEndRendering2KHR(commandBuffer, pRenderingEndInfo); + } +#endif +#if (defined(VK_KHR_dynamic_rendering)) + void cmdEndRenderingKHR(VkCommandBuffer commandBuffer) const noexcept { + fp_vkCmdEndRenderingKHR(commandBuffer); + } +#endif +#if (defined(VK_EXT_transform_feedback)) + void cmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets) const noexcept { + fp_vkCmdEndTransformFeedbackEXT(commandBuffer, firstCounterBuffer, counterBufferCount, pCounterBuffers, pCounterBufferOffsets); + } +#endif +#if (defined(VK_KHR_video_queue)) + void cmdEndVideoCodingKHR(VkCommandBuffer commandBuffer, const VkVideoEndCodingInfoKHR* pEndCodingInfo) const noexcept { + fp_vkCmdEndVideoCodingKHR(commandBuffer, pEndCodingInfo); + } +#endif + void cmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers) const noexcept { + fp_vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers); + } +#if (defined(VK_EXT_device_generated_commands)) + void cmdExecuteGeneratedCommandsEXT(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoEXT* pGeneratedCommandsInfo) const noexcept { + fp_vkCmdExecuteGeneratedCommandsEXT(commandBuffer, isPreprocessed, pGeneratedCommandsInfo); + } +#endif +#if (defined(VK_NV_device_generated_commands)) + void cmdExecuteGeneratedCommandsNV(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo) const noexcept { + fp_vkCmdExecuteGeneratedCommandsNV(commandBuffer, isPreprocessed, pGeneratedCommandsInfo); + } +#endif + void cmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) const noexcept { + fp_vkCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); + } +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + void cmdInitializeGraphScratchMemoryAMDX(VkCommandBuffer commandBuffer, VkPipeline executionGraph, VkDeviceAddress scratch, VkDeviceSize scratchSize) const noexcept { + fp_vkCmdInitializeGraphScratchMemoryAMDX(commandBuffer, executionGraph, scratch, scratchSize); + } +#endif +#if (defined(VK_EXT_debug_utils)) + void cmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo) const noexcept { + fp_vkCmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo); + } +#endif + void cmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const noexcept { + fp_vkCmdNextSubpass(commandBuffer, contents); + } +#if (defined(VK_VERSION_1_2)) + void cmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo) const noexcept { + fp_vkCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); + } +#endif +#if (defined(VK_KHR_create_renderpass2)) + void cmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo, const VkSubpassEndInfoKHR* pSubpassEndInfo) const noexcept { + fp_vkCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); + } +#endif +#if (defined(VK_NV_optical_flow)) + void cmdOpticalFlowExecuteNV(VkCommandBuffer commandBuffer, VkOpticalFlowSessionNV session, const VkOpticalFlowExecuteInfoNV* pExecuteInfo) const noexcept { + fp_vkCmdOpticalFlowExecuteNV(commandBuffer, session, pExecuteInfo); + } +#endif + void cmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) const noexcept { + fp_vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); + } +#if (defined(VK_VERSION_1_3)) + void cmdPipelineBarrier2(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo) const noexcept { + fp_vkCmdPipelineBarrier2(commandBuffer, pDependencyInfo); + } +#endif +#if (defined(VK_KHR_synchronization2)) + void cmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR* pDependencyInfo) const noexcept { + fp_vkCmdPipelineBarrier2KHR(commandBuffer, pDependencyInfo); + } +#endif +#if (defined(VK_EXT_device_generated_commands)) + void cmdPreprocessGeneratedCommandsEXT(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoEXT* pGeneratedCommandsInfo, VkCommandBuffer stateCommandBuffer) const noexcept { + fp_vkCmdPreprocessGeneratedCommandsEXT(commandBuffer, pGeneratedCommandsInfo, stateCommandBuffer); + } +#endif +#if (defined(VK_NV_device_generated_commands)) + void cmdPreprocessGeneratedCommandsNV(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo) const noexcept { + fp_vkCmdPreprocessGeneratedCommandsNV(commandBuffer, pGeneratedCommandsInfo); + } +#endif + void cmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues) const noexcept { + fp_vkCmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues); + } +#if (defined(VK_VERSION_1_4)) + void cmdPushConstants2(VkCommandBuffer commandBuffer, const VkPushConstantsInfo* pPushConstantsInfo) const noexcept { + fp_vkCmdPushConstants2(commandBuffer, pPushConstantsInfo); + } +#endif +#if (defined(VK_KHR_maintenance6)) + void cmdPushConstants2KHR(VkCommandBuffer commandBuffer, const VkPushConstantsInfoKHR* pPushConstantsInfo) const noexcept { + fp_vkCmdPushConstants2KHR(commandBuffer, pPushConstantsInfo); + } +#endif +#if (defined(VK_VERSION_1_4)) + void cmdPushDescriptorSet(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites) const noexcept { + fp_vkCmdPushDescriptorSet(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites); + } +#endif +#if (defined(VK_VERSION_1_4)) + void cmdPushDescriptorSet2(VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfo* pPushDescriptorSetInfo) const noexcept { + fp_vkCmdPushDescriptorSet2(commandBuffer, pPushDescriptorSetInfo); + } +#endif +#if (defined(VK_KHR_maintenance6)) + void cmdPushDescriptorSet2KHR(VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfoKHR* pPushDescriptorSetInfo) const noexcept { + fp_vkCmdPushDescriptorSet2KHR(commandBuffer, pPushDescriptorSetInfo); + } +#endif +#if (defined(VK_KHR_push_descriptor)) + void cmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites) const noexcept { + fp_vkCmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites); + } +#endif +#if (defined(VK_VERSION_1_4)) + void cmdPushDescriptorSetWithTemplate(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData) const noexcept { + fp_vkCmdPushDescriptorSetWithTemplate(commandBuffer, descriptorUpdateTemplate, layout, set, pData); + } +#endif +#if (defined(VK_VERSION_1_4)) + void cmdPushDescriptorSetWithTemplate2(VkCommandBuffer commandBuffer, const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo) const noexcept { + fp_vkCmdPushDescriptorSetWithTemplate2(commandBuffer, pPushDescriptorSetWithTemplateInfo); + } +#endif +#if (defined(VK_KHR_maintenance6)) + void cmdPushDescriptorSetWithTemplate2KHR(VkCommandBuffer commandBuffer, const VkPushDescriptorSetWithTemplateInfoKHR* pPushDescriptorSetWithTemplateInfo) const noexcept { + fp_vkCmdPushDescriptorSetWithTemplate2KHR(commandBuffer, pPushDescriptorSetWithTemplateInfo); + } +#endif +#if (defined(VK_KHR_push_descriptor) || defined(VK_KHR_descriptor_update_template)) + void cmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData) const noexcept { + fp_vkCmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData); + } +#endif + void cmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const noexcept { + fp_vkCmdResetEvent(commandBuffer, event, stageMask); + } +#if (defined(VK_VERSION_1_3)) + void cmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask) const noexcept { + fp_vkCmdResetEvent2(commandBuffer, event, stageMask); + } +#endif +#if (defined(VK_KHR_synchronization2)) + void cmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask) const noexcept { + fp_vkCmdResetEvent2KHR(commandBuffer, event, stageMask); + } +#endif + void cmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const noexcept { + fp_vkCmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount); + } + void cmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions) const noexcept { + fp_vkCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); + } +#if (defined(VK_VERSION_1_3)) + void cmdResolveImage2(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo) const noexcept { + fp_vkCmdResolveImage2(commandBuffer, pResolveImageInfo); + } +#endif +#if (defined(VK_KHR_copy_commands2)) + void cmdResolveImage2KHR(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR* pResolveImageInfo) const noexcept { + fp_vkCmdResolveImage2KHR(commandBuffer, pResolveImageInfo); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetAlphaToCoverageEnableEXT(VkCommandBuffer commandBuffer, VkBool32 alphaToCoverageEnable) const noexcept { + fp_vkCmdSetAlphaToCoverageEnableEXT(commandBuffer, alphaToCoverageEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetAlphaToOneEnableEXT(VkCommandBuffer commandBuffer, VkBool32 alphaToOneEnable) const noexcept { + fp_vkCmdSetAlphaToOneEnableEXT(commandBuffer, alphaToOneEnable); + } +#endif +#if (defined(VK_EXT_attachment_feedback_loop_dynamic_state)) + void cmdSetAttachmentFeedbackLoopEnableEXT(VkCommandBuffer commandBuffer, VkImageAspectFlags aspectMask) const noexcept { + fp_vkCmdSetAttachmentFeedbackLoopEnableEXT(commandBuffer, aspectMask); + } +#endif + void cmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const noexcept { + fp_vkCmdSetBlendConstants(commandBuffer, blendConstants); + } +#if (defined(VK_NV_device_diagnostic_checkpoints)) + void cmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void* pCheckpointMarker) const noexcept { + fp_vkCmdSetCheckpointNV(commandBuffer, pCheckpointMarker); + } +#endif +#if (defined(VK_NV_shading_rate_image)) + void cmdSetCoarseSampleOrderNV(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders) const noexcept { + fp_vkCmdSetCoarseSampleOrderNV(commandBuffer, sampleOrderType, customSampleOrderCount, pCustomSampleOrders); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetColorBlendAdvancedEXT(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount, const VkColorBlendAdvancedEXT* pColorBlendAdvanced) const noexcept { + fp_vkCmdSetColorBlendAdvancedEXT(commandBuffer, firstAttachment, attachmentCount, pColorBlendAdvanced); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetColorBlendEnableEXT(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount, const VkBool32* pColorBlendEnables) const noexcept { + fp_vkCmdSetColorBlendEnableEXT(commandBuffer, firstAttachment, attachmentCount, pColorBlendEnables); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetColorBlendEquationEXT(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount, const VkColorBlendEquationEXT* pColorBlendEquations) const noexcept { + fp_vkCmdSetColorBlendEquationEXT(commandBuffer, firstAttachment, attachmentCount, pColorBlendEquations); + } +#endif +#if (defined(VK_EXT_color_write_enable)) + void cmdSetColorWriteEnableEXT(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkBool32* pColorWriteEnables) const noexcept { + fp_vkCmdSetColorWriteEnableEXT(commandBuffer, attachmentCount, pColorWriteEnables); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetColorWriteMaskEXT(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount, const VkColorComponentFlags* pColorWriteMasks) const noexcept { + fp_vkCmdSetColorWriteMaskEXT(commandBuffer, firstAttachment, attachmentCount, pColorWriteMasks); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetConservativeRasterizationModeEXT(VkCommandBuffer commandBuffer, VkConservativeRasterizationModeEXT conservativeRasterizationMode) const noexcept { + fp_vkCmdSetConservativeRasterizationModeEXT(commandBuffer, conservativeRasterizationMode); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetCoverageModulationModeNV(VkCommandBuffer commandBuffer, VkCoverageModulationModeNV coverageModulationMode) const noexcept { + fp_vkCmdSetCoverageModulationModeNV(commandBuffer, coverageModulationMode); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetCoverageModulationTableEnableNV(VkCommandBuffer commandBuffer, VkBool32 coverageModulationTableEnable) const noexcept { + fp_vkCmdSetCoverageModulationTableEnableNV(commandBuffer, coverageModulationTableEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetCoverageModulationTableNV(VkCommandBuffer commandBuffer, uint32_t coverageModulationTableCount, const float* pCoverageModulationTable) const noexcept { + fp_vkCmdSetCoverageModulationTableNV(commandBuffer, coverageModulationTableCount, pCoverageModulationTable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetCoverageReductionModeNV(VkCommandBuffer commandBuffer, VkCoverageReductionModeNV coverageReductionMode) const noexcept { + fp_vkCmdSetCoverageReductionModeNV(commandBuffer, coverageReductionMode); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetCoverageToColorEnableNV(VkCommandBuffer commandBuffer, VkBool32 coverageToColorEnable) const noexcept { + fp_vkCmdSetCoverageToColorEnableNV(commandBuffer, coverageToColorEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetCoverageToColorLocationNV(VkCommandBuffer commandBuffer, uint32_t coverageToColorLocation) const noexcept { + fp_vkCmdSetCoverageToColorLocationNV(commandBuffer, coverageToColorLocation); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetCullMode(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const noexcept { + fp_vkCmdSetCullMode(commandBuffer, cullMode); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const noexcept { + fp_vkCmdSetCullModeEXT(commandBuffer, cullMode); + } +#endif + void cmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) const noexcept { + fp_vkCmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor); + } +#if (defined(VK_EXT_depth_bias_control)) + void cmdSetDepthBias2EXT(VkCommandBuffer commandBuffer, const VkDepthBiasInfoEXT* pDepthBiasInfo) const noexcept { + fp_vkCmdSetDepthBias2EXT(commandBuffer, pDepthBiasInfo); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetDepthBiasEnable(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable) const noexcept { + fp_vkCmdSetDepthBiasEnable(commandBuffer, depthBiasEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + void cmdSetDepthBiasEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable) const noexcept { + fp_vkCmdSetDepthBiasEnableEXT(commandBuffer, depthBiasEnable); + } +#endif + void cmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const noexcept { + fp_vkCmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds); + } +#if (defined(VK_VERSION_1_3)) + void cmdSetDepthBoundsTestEnable(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable) const noexcept { + fp_vkCmdSetDepthBoundsTestEnable(commandBuffer, depthBoundsTestEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable) const noexcept { + fp_vkCmdSetDepthBoundsTestEnableEXT(commandBuffer, depthBoundsTestEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetDepthClampEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthClampEnable) const noexcept { + fp_vkCmdSetDepthClampEnableEXT(commandBuffer, depthClampEnable); + } +#endif +#if (defined(VK_EXT_shader_object) || defined(VK_EXT_depth_clamp_control)) + void cmdSetDepthClampRangeEXT(VkCommandBuffer commandBuffer, VkDepthClampModeEXT depthClampMode, const VkDepthClampRangeEXT* pDepthClampRange) const noexcept { + fp_vkCmdSetDepthClampRangeEXT(commandBuffer, depthClampMode, pDepthClampRange); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetDepthClipEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthClipEnable) const noexcept { + fp_vkCmdSetDepthClipEnableEXT(commandBuffer, depthClipEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetDepthClipNegativeOneToOneEXT(VkCommandBuffer commandBuffer, VkBool32 negativeOneToOne) const noexcept { + fp_vkCmdSetDepthClipNegativeOneToOneEXT(commandBuffer, negativeOneToOne); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetDepthCompareOp(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const noexcept { + fp_vkCmdSetDepthCompareOp(commandBuffer, depthCompareOp); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const noexcept { + fp_vkCmdSetDepthCompareOpEXT(commandBuffer, depthCompareOp); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetDepthTestEnable(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const noexcept { + fp_vkCmdSetDepthTestEnable(commandBuffer, depthTestEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const noexcept { + fp_vkCmdSetDepthTestEnableEXT(commandBuffer, depthTestEnable); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetDepthWriteEnable(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const noexcept { + fp_vkCmdSetDepthWriteEnable(commandBuffer, depthWriteEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const noexcept { + fp_vkCmdSetDepthWriteEnableEXT(commandBuffer, depthWriteEnable); + } +#endif +#if (defined(VK_KHR_maintenance6)) + void cmdSetDescriptorBufferOffsets2EXT(VkCommandBuffer commandBuffer, const VkSetDescriptorBufferOffsetsInfoEXT* pSetDescriptorBufferOffsetsInfo) const noexcept { + fp_vkCmdSetDescriptorBufferOffsets2EXT(commandBuffer, pSetDescriptorBufferOffsetsInfo); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + void cmdSetDescriptorBufferOffsetsEXT(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const uint32_t* pBufferIndices, const VkDeviceSize* pOffsets) const noexcept { + fp_vkCmdSetDescriptorBufferOffsetsEXT(commandBuffer, pipelineBindPoint, layout, firstSet, setCount, pBufferIndices, pOffsets); + } +#endif +#if (defined(VK_VERSION_1_1)) + void cmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const noexcept { + fp_vkCmdSetDeviceMask(commandBuffer, deviceMask); + } +#endif +#if (defined(VK_KHR_device_group)) + void cmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const noexcept { + fp_vkCmdSetDeviceMaskKHR(commandBuffer, deviceMask); + } +#endif +#if (defined(VK_EXT_discard_rectangles)) + void cmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles) const noexcept { + fp_vkCmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount, pDiscardRectangles); + } +#endif +#if (defined(VK_EXT_discard_rectangles)) && VK_HEADER_VERSION >= 241 + void cmdSetDiscardRectangleEnableEXT(VkCommandBuffer commandBuffer, VkBool32 discardRectangleEnable) const noexcept { + fp_vkCmdSetDiscardRectangleEnableEXT(commandBuffer, discardRectangleEnable); + } +#endif +#if (defined(VK_EXT_discard_rectangles)) && VK_HEADER_VERSION >= 241 + void cmdSetDiscardRectangleModeEXT(VkCommandBuffer commandBuffer, VkDiscardRectangleModeEXT discardRectangleMode) const noexcept { + fp_vkCmdSetDiscardRectangleModeEXT(commandBuffer, discardRectangleMode); + } +#endif + void cmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const noexcept { + fp_vkCmdSetEvent(commandBuffer, event, stageMask); + } +#if (defined(VK_VERSION_1_3)) + void cmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo) const noexcept { + fp_vkCmdSetEvent2(commandBuffer, event, pDependencyInfo); + } +#endif +#if (defined(VK_KHR_synchronization2)) + void cmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR* pDependencyInfo) const noexcept { + fp_vkCmdSetEvent2KHR(commandBuffer, event, pDependencyInfo); + } +#endif +#if (defined(VK_NV_scissor_exclusive)) && VK_HEADER_VERSION >= 241 + void cmdSetExclusiveScissorEnableNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkBool32* pExclusiveScissorEnables) const noexcept { + fp_vkCmdSetExclusiveScissorEnableNV(commandBuffer, firstExclusiveScissor, exclusiveScissorCount, pExclusiveScissorEnables); + } +#endif +#if (defined(VK_NV_scissor_exclusive)) + void cmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors) const noexcept { + fp_vkCmdSetExclusiveScissorNV(commandBuffer, firstExclusiveScissor, exclusiveScissorCount, pExclusiveScissors); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetExtraPrimitiveOverestimationSizeEXT(VkCommandBuffer commandBuffer, float extraPrimitiveOverestimationSize) const noexcept { + fp_vkCmdSetExtraPrimitiveOverestimationSizeEXT(commandBuffer, extraPrimitiveOverestimationSize); + } +#endif +#if (defined(VK_NV_fragment_shading_rate_enums)) + void cmdSetFragmentShadingRateEnumNV(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) const noexcept { + fp_vkCmdSetFragmentShadingRateEnumNV(commandBuffer, shadingRate, combinerOps); + } +#endif +#if (defined(VK_KHR_fragment_shading_rate)) + void cmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) const noexcept { + fp_vkCmdSetFragmentShadingRateKHR(commandBuffer, pFragmentSize, combinerOps); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetFrontFace(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const noexcept { + fp_vkCmdSetFrontFace(commandBuffer, frontFace); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const noexcept { + fp_vkCmdSetFrontFaceEXT(commandBuffer, frontFace); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetLineRasterizationModeEXT(VkCommandBuffer commandBuffer, VkLineRasterizationModeEXT lineRasterizationMode) const noexcept { + fp_vkCmdSetLineRasterizationModeEXT(commandBuffer, lineRasterizationMode); + } +#endif +#if (defined(VK_VERSION_1_4)) + void cmdSetLineStipple(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern) const noexcept { + fp_vkCmdSetLineStipple(commandBuffer, lineStippleFactor, lineStipplePattern); + } +#endif +#if (defined(VK_EXT_line_rasterization)) + void cmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern) const noexcept { + fp_vkCmdSetLineStippleEXT(commandBuffer, lineStippleFactor, lineStipplePattern); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetLineStippleEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stippledLineEnable) const noexcept { + fp_vkCmdSetLineStippleEnableEXT(commandBuffer, stippledLineEnable); + } +#endif +#if (defined(VK_KHR_line_rasterization)) + void cmdSetLineStippleKHR(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern) const noexcept { + fp_vkCmdSetLineStippleKHR(commandBuffer, lineStippleFactor, lineStipplePattern); + } +#endif + void cmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const noexcept { + fp_vkCmdSetLineWidth(commandBuffer, lineWidth); + } +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + void cmdSetLogicOpEXT(VkCommandBuffer commandBuffer, VkLogicOp logicOp) const noexcept { + fp_vkCmdSetLogicOpEXT(commandBuffer, logicOp); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetLogicOpEnableEXT(VkCommandBuffer commandBuffer, VkBool32 logicOpEnable) const noexcept { + fp_vkCmdSetLogicOpEnableEXT(commandBuffer, logicOpEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + void cmdSetPatchControlPointsEXT(VkCommandBuffer commandBuffer, uint32_t patchControlPoints) const noexcept { + fp_vkCmdSetPatchControlPointsEXT(commandBuffer, patchControlPoints); + } +#endif +#if (defined(VK_INTEL_performance_query)) + VkResult cmdSetPerformanceMarkerINTEL(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo) const noexcept { + return fp_vkCmdSetPerformanceMarkerINTEL(commandBuffer, pMarkerInfo); + } +#endif +#if (defined(VK_INTEL_performance_query)) + VkResult cmdSetPerformanceOverrideINTEL(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo) const noexcept { + return fp_vkCmdSetPerformanceOverrideINTEL(commandBuffer, pOverrideInfo); + } +#endif +#if (defined(VK_INTEL_performance_query)) + VkResult cmdSetPerformanceStreamMarkerINTEL(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo) const noexcept { + return fp_vkCmdSetPerformanceStreamMarkerINTEL(commandBuffer, pMarkerInfo); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetPolygonModeEXT(VkCommandBuffer commandBuffer, VkPolygonMode polygonMode) const noexcept { + fp_vkCmdSetPolygonModeEXT(commandBuffer, polygonMode); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetPrimitiveRestartEnable(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable) const noexcept { + fp_vkCmdSetPrimitiveRestartEnable(commandBuffer, primitiveRestartEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + void cmdSetPrimitiveRestartEnableEXT(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable) const noexcept { + fp_vkCmdSetPrimitiveRestartEnableEXT(commandBuffer, primitiveRestartEnable); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetPrimitiveTopology(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology) const noexcept { + fp_vkCmdSetPrimitiveTopology(commandBuffer, primitiveTopology); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology) const noexcept { + fp_vkCmdSetPrimitiveTopologyEXT(commandBuffer, primitiveTopology); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetProvokingVertexModeEXT(VkCommandBuffer commandBuffer, VkProvokingVertexModeEXT provokingVertexMode) const noexcept { + fp_vkCmdSetProvokingVertexModeEXT(commandBuffer, provokingVertexMode); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetRasterizationSamplesEXT(VkCommandBuffer commandBuffer, VkSampleCountFlagBits rasterizationSamples) const noexcept { + fp_vkCmdSetRasterizationSamplesEXT(commandBuffer, rasterizationSamples); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetRasterizationStreamEXT(VkCommandBuffer commandBuffer, uint32_t rasterizationStream) const noexcept { + fp_vkCmdSetRasterizationStreamEXT(commandBuffer, rasterizationStream); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetRasterizerDiscardEnable(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable) const noexcept { + fp_vkCmdSetRasterizerDiscardEnable(commandBuffer, rasterizerDiscardEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + void cmdSetRasterizerDiscardEnableEXT(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable) const noexcept { + fp_vkCmdSetRasterizerDiscardEnableEXT(commandBuffer, rasterizerDiscardEnable); + } +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + void cmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize) const noexcept { + fp_vkCmdSetRayTracingPipelineStackSizeKHR(commandBuffer, pipelineStackSize); + } +#endif +#if (defined(VK_VERSION_1_4)) + void cmdSetRenderingAttachmentLocations(VkCommandBuffer commandBuffer, const VkRenderingAttachmentLocationInfo* pLocationInfo) const noexcept { + fp_vkCmdSetRenderingAttachmentLocations(commandBuffer, pLocationInfo); + } +#endif +#if (defined(VK_KHR_dynamic_rendering_local_read)) + void cmdSetRenderingAttachmentLocationsKHR(VkCommandBuffer commandBuffer, const VkRenderingAttachmentLocationInfoKHR* pLocationInfo) const noexcept { + fp_vkCmdSetRenderingAttachmentLocationsKHR(commandBuffer, pLocationInfo); + } +#endif +#if (defined(VK_VERSION_1_4)) + void cmdSetRenderingInputAttachmentIndices(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo) const noexcept { + fp_vkCmdSetRenderingInputAttachmentIndices(commandBuffer, pInputAttachmentIndexInfo); + } +#endif +#if (defined(VK_KHR_dynamic_rendering_local_read)) + void cmdSetRenderingInputAttachmentIndicesKHR(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfoKHR* pInputAttachmentIndexInfo) const noexcept { + fp_vkCmdSetRenderingInputAttachmentIndicesKHR(commandBuffer, pInputAttachmentIndexInfo); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetRepresentativeFragmentTestEnableNV(VkCommandBuffer commandBuffer, VkBool32 representativeFragmentTestEnable) const noexcept { + fp_vkCmdSetRepresentativeFragmentTestEnableNV(commandBuffer, representativeFragmentTestEnable); + } +#endif +#if (defined(VK_EXT_sample_locations)) + void cmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo) const noexcept { + fp_vkCmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetSampleLocationsEnableEXT(VkCommandBuffer commandBuffer, VkBool32 sampleLocationsEnable) const noexcept { + fp_vkCmdSetSampleLocationsEnableEXT(commandBuffer, sampleLocationsEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetSampleMaskEXT(VkCommandBuffer commandBuffer, VkSampleCountFlagBits samples, const VkSampleMask* pSampleMask) const noexcept { + fp_vkCmdSetSampleMaskEXT(commandBuffer, samples, pSampleMask); + } +#endif + void cmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors) const noexcept { + fp_vkCmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); + } +#if (defined(VK_VERSION_1_3)) + void cmdSetScissorWithCount(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors) const noexcept { + fp_vkCmdSetScissorWithCount(commandBuffer, scissorCount, pScissors); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors) const noexcept { + fp_vkCmdSetScissorWithCountEXT(commandBuffer, scissorCount, pScissors); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetShadingRateImageEnableNV(VkCommandBuffer commandBuffer, VkBool32 shadingRateImageEnable) const noexcept { + fp_vkCmdSetShadingRateImageEnableNV(commandBuffer, shadingRateImageEnable); + } +#endif + void cmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) const noexcept { + fp_vkCmdSetStencilCompareMask(commandBuffer, faceMask, compareMask); + } +#if (defined(VK_VERSION_1_3)) + void cmdSetStencilOp(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const noexcept { + fp_vkCmdSetStencilOp(commandBuffer, faceMask, failOp, passOp, depthFailOp, compareOp); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const noexcept { + fp_vkCmdSetStencilOpEXT(commandBuffer, faceMask, failOp, passOp, depthFailOp, compareOp); + } +#endif + void cmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) const noexcept { + fp_vkCmdSetStencilReference(commandBuffer, faceMask, reference); + } +#if (defined(VK_VERSION_1_3)) + void cmdSetStencilTestEnable(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const noexcept { + fp_vkCmdSetStencilTestEnable(commandBuffer, stencilTestEnable); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const noexcept { + fp_vkCmdSetStencilTestEnableEXT(commandBuffer, stencilTestEnable); + } +#endif + void cmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) const noexcept { + fp_vkCmdSetStencilWriteMask(commandBuffer, faceMask, writeMask); + } +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetTessellationDomainOriginEXT(VkCommandBuffer commandBuffer, VkTessellationDomainOrigin domainOrigin) const noexcept { + fp_vkCmdSetTessellationDomainOriginEXT(commandBuffer, domainOrigin); + } +#endif +#if (defined(VK_EXT_vertex_input_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetVertexInputEXT(VkCommandBuffer commandBuffer, uint32_t vertexBindingDescriptionCount, const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions, uint32_t vertexAttributeDescriptionCount, const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions) const noexcept { + fp_vkCmdSetVertexInputEXT(commandBuffer, vertexBindingDescriptionCount, pVertexBindingDescriptions, vertexAttributeDescriptionCount, pVertexAttributeDescriptions); + } +#endif + void cmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports) const noexcept { + fp_vkCmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); + } +#if (defined(VK_NV_shading_rate_image)) + void cmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes) const noexcept { + fp_vkCmdSetViewportShadingRatePaletteNV(commandBuffer, firstViewport, viewportCount, pShadingRatePalettes); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetViewportSwizzleNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportSwizzleNV* pViewportSwizzles) const noexcept { + fp_vkCmdSetViewportSwizzleNV(commandBuffer, firstViewport, viewportCount, pViewportSwizzles); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + void cmdSetViewportWScalingEnableNV(VkCommandBuffer commandBuffer, VkBool32 viewportWScalingEnable) const noexcept { + fp_vkCmdSetViewportWScalingEnableNV(commandBuffer, viewportWScalingEnable); + } +#endif +#if (defined(VK_NV_clip_space_w_scaling)) + void cmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings) const noexcept { + fp_vkCmdSetViewportWScalingNV(commandBuffer, firstViewport, viewportCount, pViewportWScalings); + } +#endif +#if (defined(VK_VERSION_1_3)) + void cmdSetViewportWithCount(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports) const noexcept { + fp_vkCmdSetViewportWithCount(commandBuffer, viewportCount, pViewports); + } +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + void cmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports) const noexcept { + fp_vkCmdSetViewportWithCountEXT(commandBuffer, viewportCount, pViewports); + } +#endif +#if (defined(VK_HUAWEI_subpass_shading)) + void cmdSubpassShadingHUAWEI(VkCommandBuffer commandBuffer) const noexcept { + fp_vkCmdSubpassShadingHUAWEI(commandBuffer); + } +#endif +#if (defined(VK_KHR_ray_tracing_maintenance1)) + void cmdTraceRaysIndirect2KHR(VkCommandBuffer commandBuffer, VkDeviceAddress indirectDeviceAddress) const noexcept { + fp_vkCmdTraceRaysIndirect2KHR(commandBuffer, indirectDeviceAddress); + } +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + void cmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress) const noexcept { + fp_vkCmdTraceRaysIndirectKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, indirectDeviceAddress); + } +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + void cmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) const noexcept { + fp_vkCmdTraceRaysKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, width, height, depth); + } +#endif +#if (defined(VK_NV_ray_tracing)) + void cmdTraceRaysNV(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth) const noexcept { + fp_vkCmdTraceRaysNV(commandBuffer, raygenShaderBindingTableBuffer, raygenShaderBindingOffset, missShaderBindingTableBuffer, missShaderBindingOffset, missShaderBindingStride, hitShaderBindingTableBuffer, hitShaderBindingOffset, hitShaderBindingStride, callableShaderBindingTableBuffer, callableShaderBindingOffset, callableShaderBindingStride, width, height, depth); + } +#endif + void cmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData) const noexcept { + fp_vkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); + } +#if (defined(VK_NV_device_generated_commands_compute)) + void cmdUpdatePipelineIndirectBufferNV(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const noexcept { + fp_vkCmdUpdatePipelineIndirectBufferNV(commandBuffer, pipelineBindPoint, pipeline); + } +#endif + void cmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) const noexcept { + fp_vkCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); + } +#if (defined(VK_VERSION_1_3)) + void cmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos) const noexcept { + fp_vkCmdWaitEvents2(commandBuffer, eventCount, pEvents, pDependencyInfos); + } +#endif +#if (defined(VK_KHR_synchronization2)) + void cmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfoKHR* pDependencyInfos) const noexcept { + fp_vkCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + void cmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const noexcept { + fp_vkCmdWriteAccelerationStructuresPropertiesKHR(commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery); + } +#endif +#if (defined(VK_NV_ray_tracing)) + void cmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const noexcept { + fp_vkCmdWriteAccelerationStructuresPropertiesNV(commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery); + } +#endif +#if (defined(VK_AMD_buffer_marker)) + void cmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const noexcept { + fp_vkCmdWriteBufferMarker2AMD(commandBuffer, stage, dstBuffer, dstOffset, marker); + } +#endif +#if (defined(VK_AMD_buffer_marker)) + void cmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const noexcept { + fp_vkCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + void cmdWriteMicromapsPropertiesEXT(VkCommandBuffer commandBuffer, uint32_t micromapCount, const VkMicromapEXT* pMicromaps, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const noexcept { + fp_vkCmdWriteMicromapsPropertiesEXT(commandBuffer, micromapCount, pMicromaps, queryType, queryPool, firstQuery); + } +#endif + void cmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query) const noexcept { + fp_vkCmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, query); + } +#if (defined(VK_VERSION_1_3)) + void cmdWriteTimestamp2(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query) const noexcept { + fp_vkCmdWriteTimestamp2(commandBuffer, stage, queryPool, query); + } +#endif +#if (defined(VK_KHR_synchronization2)) + void cmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query) const noexcept { + fp_vkCmdWriteTimestamp2KHR(commandBuffer, stage, queryPool, query); + } +#endif +#if (defined(VK_NV_ray_tracing)) + VkResult compileDeferredNV(VkPipeline pipeline, uint32_t shader) const noexcept { + return fp_vkCompileDeferredNV(device, pipeline, shader); + } +#endif +#if (defined(VK_NV_cooperative_vector)) + VkResult convertCooperativeVectorMatrixNV(const VkConvertCooperativeVectorMatrixInfoNV* pInfo) const noexcept { + return fp_vkConvertCooperativeVectorMatrixNV(device, pInfo); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + VkResult copyAccelerationStructureKHR(VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo) const noexcept { + return fp_vkCopyAccelerationStructureKHR(device, deferredOperation, pInfo); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + VkResult copyAccelerationStructureToMemoryKHR(VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) const noexcept { + return fp_vkCopyAccelerationStructureToMemoryKHR(device, deferredOperation, pInfo); + } +#endif +#if (defined(VK_VERSION_1_4)) + VkResult copyImageToImage(const VkCopyImageToImageInfo* pCopyImageToImageInfo) const noexcept { + return fp_vkCopyImageToImage(device, pCopyImageToImageInfo); + } +#endif +#if (defined(VK_EXT_host_image_copy)) + VkResult copyImageToImageEXT(const VkCopyImageToImageInfoEXT* pCopyImageToImageInfo) const noexcept { + return fp_vkCopyImageToImageEXT(device, pCopyImageToImageInfo); + } +#endif +#if (defined(VK_VERSION_1_4)) + VkResult copyImageToMemory(const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo) const noexcept { + return fp_vkCopyImageToMemory(device, pCopyImageToMemoryInfo); + } +#endif +#if (defined(VK_EXT_host_image_copy)) + VkResult copyImageToMemoryEXT(const VkCopyImageToMemoryInfoEXT* pCopyImageToMemoryInfo) const noexcept { + return fp_vkCopyImageToMemoryEXT(device, pCopyImageToMemoryInfo); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + VkResult copyMemoryToAccelerationStructureKHR(VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) const noexcept { + return fp_vkCopyMemoryToAccelerationStructureKHR(device, deferredOperation, pInfo); + } +#endif +#if (defined(VK_VERSION_1_4)) + VkResult copyMemoryToImage(const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo) const noexcept { + return fp_vkCopyMemoryToImage(device, pCopyMemoryToImageInfo); + } +#endif +#if (defined(VK_EXT_host_image_copy)) + VkResult copyMemoryToImageEXT(const VkCopyMemoryToImageInfoEXT* pCopyMemoryToImageInfo) const noexcept { + return fp_vkCopyMemoryToImageEXT(device, pCopyMemoryToImageInfo); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + VkResult copyMemoryToMicromapEXT(VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToMicromapInfoEXT* pInfo) const noexcept { + return fp_vkCopyMemoryToMicromapEXT(device, deferredOperation, pInfo); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + VkResult copyMicromapEXT(VkDeferredOperationKHR deferredOperation, const VkCopyMicromapInfoEXT* pInfo) const noexcept { + return fp_vkCopyMicromapEXT(device, deferredOperation, pInfo); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + VkResult copyMicromapToMemoryEXT(VkDeferredOperationKHR deferredOperation, const VkCopyMicromapToMemoryInfoEXT* pInfo) const noexcept { + return fp_vkCopyMicromapToMemoryEXT(device, deferredOperation, pInfo); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + VkResult createAccelerationStructureKHR(const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure) const noexcept { + return fp_vkCreateAccelerationStructureKHR(device, pCreateInfo, pAllocator, pAccelerationStructure); + } +#endif +#if (defined(VK_NV_ray_tracing)) + VkResult createAccelerationStructureNV(const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure) const noexcept { + return fp_vkCreateAccelerationStructureNV(device, pCreateInfo, pAllocator, pAccelerationStructure); + } +#endif + VkResult createBuffer(const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer) const noexcept { + return fp_vkCreateBuffer(device, pCreateInfo, pAllocator, pBuffer); + } +#if (defined(VK_FUCHSIA_buffer_collection)) + VkResult createBufferCollectionFUCHSIA(const VkBufferCollectionCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferCollectionFUCHSIA* pCollection) const noexcept { + return fp_vkCreateBufferCollectionFUCHSIA(device, pCreateInfo, pAllocator, pCollection); + } +#endif + VkResult createBufferView(const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView) const noexcept { + return fp_vkCreateBufferView(device, pCreateInfo, pAllocator, pView); + } + VkResult createCommandPool(const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool) const noexcept { + return fp_vkCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); + } + VkResult createComputePipelines(VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) const noexcept { + return fp_vkCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } +#if (defined(VK_NVX_binary_import)) + VkResult createCuFunctionNVX(const VkCuFunctionCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuFunctionNVX* pFunction) const noexcept { + return fp_vkCreateCuFunctionNVX(device, pCreateInfo, pAllocator, pFunction); + } +#endif +#if (defined(VK_NVX_binary_import)) + VkResult createCuModuleNVX(const VkCuModuleCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuModuleNVX* pModule) const noexcept { + return fp_vkCreateCuModuleNVX(device, pCreateInfo, pAllocator, pModule); + } +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + VkResult createCudaFunctionNV(const VkCudaFunctionCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCudaFunctionNV* pFunction) const noexcept { + return fp_vkCreateCudaFunctionNV(device, pCreateInfo, pAllocator, pFunction); + } +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + VkResult createCudaModuleNV(const VkCudaModuleCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCudaModuleNV* pModule) const noexcept { + return fp_vkCreateCudaModuleNV(device, pCreateInfo, pAllocator, pModule); + } +#endif +#if (defined(VK_ARM_data_graph)) + VkResult createDataGraphPipelineSessionARM(const VkDataGraphPipelineSessionCreateInfoARM* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDataGraphPipelineSessionARM* pSession) const noexcept { + return fp_vkCreateDataGraphPipelineSessionARM(device, pCreateInfo, pAllocator, pSession); + } +#endif +#if (defined(VK_ARM_data_graph)) + VkResult createDataGraphPipelinesARM(VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkDataGraphPipelineCreateInfoARM* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) const noexcept { + return fp_vkCreateDataGraphPipelinesARM(device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } +#endif +#if (defined(VK_KHR_deferred_host_operations)) + VkResult createDeferredOperationKHR(const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation) const noexcept { + return fp_vkCreateDeferredOperationKHR(device, pAllocator, pDeferredOperation); + } +#endif + VkResult createDescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool) const noexcept { + return fp_vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool); + } + VkResult createDescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) const noexcept { + return fp_vkCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout); + } +#if (defined(VK_VERSION_1_1)) + VkResult createDescriptorUpdateTemplate(const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) const noexcept { + return fp_vkCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate); + } +#endif +#if (defined(VK_KHR_descriptor_update_template)) + VkResult createDescriptorUpdateTemplateKHR(const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate) const noexcept { + return fp_vkCreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate); + } +#endif + VkResult createEvent(const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent) const noexcept { + return fp_vkCreateEvent(device, pCreateInfo, pAllocator, pEvent); + } +#if (defined(VK_AMDX_shader_enqueue)) + VkResult createExecutionGraphPipelinesAMDX(VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkExecutionGraphPipelineCreateInfoAMDX* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) const noexcept { + return fp_vkCreateExecutionGraphPipelinesAMDX(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } +#endif +#if (defined(VK_NV_external_compute_queue)) + VkResult createExternalComputeQueueNV(const VkExternalComputeQueueCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkExternalComputeQueueNV* pExternalQueue) const noexcept { + return fp_vkCreateExternalComputeQueueNV(device, pCreateInfo, pAllocator, pExternalQueue); + } +#endif + VkResult createFence(const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence) const noexcept { + return fp_vkCreateFence(device, pCreateInfo, pAllocator, pFence); + } + VkResult createFramebuffer(const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer) const noexcept { + return fp_vkCreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer); + } + VkResult createGraphicsPipelines(VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) const noexcept { + return fp_vkCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } + VkResult createImage(const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage) const noexcept { + return fp_vkCreateImage(device, pCreateInfo, pAllocator, pImage); + } + VkResult createImageView(const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView) const noexcept { + return fp_vkCreateImageView(device, pCreateInfo, pAllocator, pView); + } +#if (defined(VK_EXT_device_generated_commands)) + VkResult createIndirectCommandsLayoutEXT(const VkIndirectCommandsLayoutCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutEXT* pIndirectCommandsLayout) const noexcept { + return fp_vkCreateIndirectCommandsLayoutEXT(device, pCreateInfo, pAllocator, pIndirectCommandsLayout); + } +#endif +#if (defined(VK_NV_device_generated_commands)) + VkResult createIndirectCommandsLayoutNV(const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout) const noexcept { + return fp_vkCreateIndirectCommandsLayoutNV(device, pCreateInfo, pAllocator, pIndirectCommandsLayout); + } +#endif +#if (defined(VK_EXT_device_generated_commands)) + VkResult createIndirectExecutionSetEXT(const VkIndirectExecutionSetCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectExecutionSetEXT* pIndirectExecutionSet) const noexcept { + return fp_vkCreateIndirectExecutionSetEXT(device, pCreateInfo, pAllocator, pIndirectExecutionSet); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + VkResult createMicromapEXT(const VkMicromapCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkMicromapEXT* pMicromap) const noexcept { + return fp_vkCreateMicromapEXT(device, pCreateInfo, pAllocator, pMicromap); + } +#endif +#if (defined(VK_NV_optical_flow)) + VkResult createOpticalFlowSessionNV(const VkOpticalFlowSessionCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkOpticalFlowSessionNV* pSession) const noexcept { + return fp_vkCreateOpticalFlowSessionNV(device, pCreateInfo, pAllocator, pSession); + } +#endif +#if (defined(VK_KHR_pipeline_binary)) + VkResult createPipelineBinariesKHR(const VkPipelineBinaryCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineBinaryHandlesInfoKHR* pBinaries) const noexcept { + return fp_vkCreatePipelineBinariesKHR(device, pCreateInfo, pAllocator, pBinaries); + } +#endif + VkResult createPipelineCache(const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache) const noexcept { + return fp_vkCreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache); + } + VkResult createPipelineLayout(const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout) const noexcept { + return fp_vkCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout); + } +#if (defined(VK_VERSION_1_3)) + VkResult createPrivateDataSlot(const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot) const noexcept { + return fp_vkCreatePrivateDataSlot(device, pCreateInfo, pAllocator, pPrivateDataSlot); + } +#endif +#if (defined(VK_EXT_private_data)) + VkResult createPrivateDataSlotEXT(const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlotEXT* pPrivateDataSlot) const noexcept { + return fp_vkCreatePrivateDataSlotEXT(device, pCreateInfo, pAllocator, pPrivateDataSlot); + } +#endif + VkResult createQueryPool(const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool) const noexcept { + return fp_vkCreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool); + } +#if (defined(VK_KHR_ray_tracing_pipeline)) + VkResult createRayTracingPipelinesKHR(VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) const noexcept { + return fp_vkCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } +#endif +#if (defined(VK_NV_ray_tracing)) + VkResult createRayTracingPipelinesNV(VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) const noexcept { + return fp_vkCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); + } +#endif + VkResult createRenderPass(const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass) const noexcept { + return fp_vkCreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); + } +#if (defined(VK_VERSION_1_2)) + VkResult createRenderPass2(const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass) const noexcept { + return fp_vkCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass); + } +#endif +#if (defined(VK_KHR_create_renderpass2)) + VkResult createRenderPass2KHR(const VkRenderPassCreateInfo2KHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass) const noexcept { + return fp_vkCreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass); + } +#endif + VkResult createSampler(const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler) const noexcept { + return fp_vkCreateSampler(device, pCreateInfo, pAllocator, pSampler); + } +#if (defined(VK_VERSION_1_1)) + VkResult createSamplerYcbcrConversion(const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) const noexcept { + return fp_vkCreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion); + } +#endif +#if (defined(VK_KHR_sampler_ycbcr_conversion)) + VkResult createSamplerYcbcrConversionKHR(const VkSamplerYcbcrConversionCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversionKHR* pYcbcrConversion) const noexcept { + return fp_vkCreateSamplerYcbcrConversionKHR(device, pCreateInfo, pAllocator, pYcbcrConversion); + } +#endif + VkResult createSemaphore(const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore) const noexcept { + return fp_vkCreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore); + } + VkResult createShaderModule(const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule) const noexcept { + return fp_vkCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule); + } +#if (defined(VK_EXT_shader_object)) + VkResult createShadersEXT(uint32_t createInfoCount, const VkShaderCreateInfoEXT* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkShaderEXT* pShaders) const noexcept { + return fp_vkCreateShadersEXT(device, createInfoCount, pCreateInfos, pAllocator, pShaders); + } +#endif +#if (defined(VK_KHR_display_swapchain)) + VkResult createSharedSwapchainsKHR(uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains) const noexcept { + return fp_vkCreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains); + } +#endif +#if (defined(VK_KHR_swapchain)) + VkResult createSwapchainKHR(const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain) const noexcept { + return fp_vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); + } +#endif +#if (defined(VK_ARM_tensors)) + VkResult createTensorARM(const VkTensorCreateInfoARM* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkTensorARM* pTensor) const noexcept { + return fp_vkCreateTensorARM(device, pCreateInfo, pAllocator, pTensor); + } +#endif +#if (defined(VK_ARM_tensors)) + VkResult createTensorViewARM(const VkTensorViewCreateInfoARM* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkTensorViewARM* pView) const noexcept { + return fp_vkCreateTensorViewARM(device, pCreateInfo, pAllocator, pView); + } +#endif +#if (defined(VK_EXT_validation_cache)) + VkResult createValidationCacheEXT(const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache) const noexcept { + return fp_vkCreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache); + } +#endif +#if (defined(VK_KHR_video_queue)) + VkResult createVideoSessionKHR(const VkVideoSessionCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkVideoSessionKHR* pVideoSession) const noexcept { + return fp_vkCreateVideoSessionKHR(device, pCreateInfo, pAllocator, pVideoSession); + } +#endif +#if (defined(VK_KHR_video_queue)) + VkResult createVideoSessionParametersKHR(const VkVideoSessionParametersCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkVideoSessionParametersKHR* pVideoSessionParameters) const noexcept { + return fp_vkCreateVideoSessionParametersKHR(device, pCreateInfo, pAllocator, pVideoSessionParameters); + } +#endif +#if (defined(VK_EXT_debug_marker)) + VkResult debugMarkerSetObjectNameEXT(const VkDebugMarkerObjectNameInfoEXT* pNameInfo) const noexcept { + return fp_vkDebugMarkerSetObjectNameEXT(device, pNameInfo); + } +#endif +#if (defined(VK_EXT_debug_marker)) + VkResult debugMarkerSetObjectTagEXT(const VkDebugMarkerObjectTagInfoEXT* pTagInfo) const noexcept { + return fp_vkDebugMarkerSetObjectTagEXT(device, pTagInfo); + } +#endif +#if (defined(VK_KHR_deferred_host_operations)) + VkResult deferredOperationJoinKHR(VkDeferredOperationKHR operation) const noexcept { + return fp_vkDeferredOperationJoinKHR(device, operation); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + void destroyAccelerationStructureKHR(VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyAccelerationStructureKHR(device, accelerationStructure, pAllocator); + } +#endif +#if (defined(VK_NV_ray_tracing)) + void destroyAccelerationStructureNV(VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyAccelerationStructureNV(device, accelerationStructure, pAllocator); + } +#endif + void destroyBuffer(VkBuffer buffer, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyBuffer(device, buffer, pAllocator); + } +#if (defined(VK_FUCHSIA_buffer_collection)) + void destroyBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyBufferCollectionFUCHSIA(device, collection, pAllocator); + } +#endif + void destroyBufferView(VkBufferView bufferView, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyBufferView(device, bufferView, pAllocator); + } + void destroyCommandPool(VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyCommandPool(device, commandPool, pAllocator); + } +#if (defined(VK_NVX_binary_import)) + void destroyCuFunctionNVX(VkCuFunctionNVX function, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyCuFunctionNVX(device, function, pAllocator); + } +#endif +#if (defined(VK_NVX_binary_import)) + void destroyCuModuleNVX(VkCuModuleNVX module, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyCuModuleNVX(device, module, pAllocator); + } +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + void destroyCudaFunctionNV(VkCudaFunctionNV function, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyCudaFunctionNV(device, function, pAllocator); + } +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + void destroyCudaModuleNV(VkCudaModuleNV module, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyCudaModuleNV(device, module, pAllocator); + } +#endif +#if (defined(VK_ARM_data_graph)) + void destroyDataGraphPipelineSessionARM(VkDataGraphPipelineSessionARM session, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyDataGraphPipelineSessionARM(device, session, pAllocator); + } +#endif +#if (defined(VK_KHR_deferred_host_operations)) + void destroyDeferredOperationKHR(VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyDeferredOperationKHR(device, operation, pAllocator); + } +#endif + void destroyDescriptorPool(VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyDescriptorPool(device, descriptorPool, pAllocator); + } + void destroyDescriptorSetLayout(VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator); + } +#if (defined(VK_VERSION_1_1)) + void destroyDescriptorUpdateTemplate(VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator); + } +#endif +#if (defined(VK_KHR_descriptor_update_template)) + void destroyDescriptorUpdateTemplateKHR(VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); + } +#endif + void destroyEvent(VkEvent event, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyEvent(device, event, pAllocator); + } +#if (defined(VK_NV_external_compute_queue)) + void destroyExternalComputeQueueNV(VkExternalComputeQueueNV externalQueue, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyExternalComputeQueueNV(device, externalQueue, pAllocator); + } +#endif + void destroyFence(VkFence fence, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyFence(device, fence, pAllocator); + } + void destroyFramebuffer(VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyFramebuffer(device, framebuffer, pAllocator); + } + void destroyImage(VkImage image, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyImage(device, image, pAllocator); + } + void destroyImageView(VkImageView imageView, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyImageView(device, imageView, pAllocator); + } +#if (defined(VK_EXT_device_generated_commands)) + void destroyIndirectCommandsLayoutEXT(VkIndirectCommandsLayoutEXT indirectCommandsLayout, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyIndirectCommandsLayoutEXT(device, indirectCommandsLayout, pAllocator); + } +#endif +#if (defined(VK_NV_device_generated_commands)) + void destroyIndirectCommandsLayoutNV(VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyIndirectCommandsLayoutNV(device, indirectCommandsLayout, pAllocator); + } +#endif +#if (defined(VK_EXT_device_generated_commands)) + void destroyIndirectExecutionSetEXT(VkIndirectExecutionSetEXT indirectExecutionSet, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyIndirectExecutionSetEXT(device, indirectExecutionSet, pAllocator); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + void destroyMicromapEXT(VkMicromapEXT micromap, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyMicromapEXT(device, micromap, pAllocator); + } +#endif +#if (defined(VK_NV_optical_flow)) + void destroyOpticalFlowSessionNV(VkOpticalFlowSessionNV session, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyOpticalFlowSessionNV(device, session, pAllocator); + } +#endif + void destroyPipeline(VkPipeline pipeline, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyPipeline(device, pipeline, pAllocator); + } +#if (defined(VK_KHR_pipeline_binary)) + void destroyPipelineBinaryKHR(VkPipelineBinaryKHR pipelineBinary, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyPipelineBinaryKHR(device, pipelineBinary, pAllocator); + } +#endif + void destroyPipelineCache(VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyPipelineCache(device, pipelineCache, pAllocator); + } + void destroyPipelineLayout(VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyPipelineLayout(device, pipelineLayout, pAllocator); + } +#if (defined(VK_VERSION_1_3)) + void destroyPrivateDataSlot(VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyPrivateDataSlot(device, privateDataSlot, pAllocator); + } +#endif +#if (defined(VK_EXT_private_data)) + void destroyPrivateDataSlotEXT(VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyPrivateDataSlotEXT(device, privateDataSlot, pAllocator); + } +#endif + void destroyQueryPool(VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyQueryPool(device, queryPool, pAllocator); + } + void destroyRenderPass(VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyRenderPass(device, renderPass, pAllocator); + } + void destroySampler(VkSampler sampler, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroySampler(device, sampler, pAllocator); + } +#if (defined(VK_VERSION_1_1)) + void destroySamplerYcbcrConversion(VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator); + } +#endif +#if (defined(VK_KHR_sampler_ycbcr_conversion)) + void destroySamplerYcbcrConversionKHR(VkSamplerYcbcrConversionKHR ycbcrConversion, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator); + } +#endif + void destroySemaphore(VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroySemaphore(device, semaphore, pAllocator); + } +#if (defined(VK_EXT_shader_object)) + void destroyShaderEXT(VkShaderEXT shader, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyShaderEXT(device, shader, pAllocator); + } +#endif + void destroyShaderModule(VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyShaderModule(device, shaderModule, pAllocator); + } +#if (defined(VK_KHR_swapchain)) + void destroySwapchainKHR(VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroySwapchainKHR(device, swapchain, pAllocator); + } +#endif +#if (defined(VK_ARM_tensors)) + void destroyTensorARM(VkTensorARM tensor, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyTensorARM(device, tensor, pAllocator); + } +#endif +#if (defined(VK_ARM_tensors)) + void destroyTensorViewARM(VkTensorViewARM tensorView, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyTensorViewARM(device, tensorView, pAllocator); + } +#endif +#if (defined(VK_EXT_validation_cache)) + void destroyValidationCacheEXT(VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyValidationCacheEXT(device, validationCache, pAllocator); + } +#endif +#if (defined(VK_KHR_video_queue)) + void destroyVideoSessionKHR(VkVideoSessionKHR videoSession, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyVideoSessionKHR(device, videoSession, pAllocator); + } +#endif +#if (defined(VK_KHR_video_queue)) + void destroyVideoSessionParametersKHR(VkVideoSessionParametersKHR videoSessionParameters, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkDestroyVideoSessionParametersKHR(device, videoSessionParameters, pAllocator); + } +#endif + VkResult deviceWaitIdle() const noexcept { + return fp_vkDeviceWaitIdle(device); + } +#if (defined(VK_EXT_display_control)) + VkResult displayPowerControlEXT(VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo) const noexcept { + return fp_vkDisplayPowerControlEXT(device, display, pDisplayPowerInfo); + } +#endif + VkResult endCommandBuffer(VkCommandBuffer commandBuffer) const noexcept { + return fp_vkEndCommandBuffer(commandBuffer); + } +#if (defined(VK_EXT_metal_objects)) + void exportMetalObjectsEXT(VkExportMetalObjectsInfoEXT* pMetalObjectsInfo) const noexcept { + fp_vkExportMetalObjectsEXT(device, pMetalObjectsInfo); + } +#endif + VkResult flushMappedMemoryRanges(uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges) const noexcept { + return fp_vkFlushMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges); + } + void freeCommandBuffers(VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers) const noexcept { + fp_vkFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); + } + VkResult freeDescriptorSets(VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets) const noexcept { + return fp_vkFreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets); + } + void freeMemory(VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator) const noexcept { + fp_vkFreeMemory(device, memory, pAllocator); + } +#if (defined(VK_KHR_acceleration_structure)) + void getAccelerationStructureBuildSizesKHR(VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) const noexcept { + fp_vkGetAccelerationStructureBuildSizesKHR(device, buildType, pBuildInfo, pMaxPrimitiveCounts, pSizeInfo); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + VkDeviceAddress getAccelerationStructureDeviceAddressKHR(const VkAccelerationStructureDeviceAddressInfoKHR* pInfo) const noexcept { + return fp_vkGetAccelerationStructureDeviceAddressKHR(device, pInfo); + } +#endif +#if (defined(VK_NV_ray_tracing)) + VkResult getAccelerationStructureHandleNV(VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData) const noexcept { + return fp_vkGetAccelerationStructureHandleNV(device, accelerationStructure, dataSize, pData); + } +#endif +#if (defined(VK_NV_ray_tracing)) + void getAccelerationStructureMemoryRequirementsNV(const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) const noexcept { + fp_vkGetAccelerationStructureMemoryRequirementsNV(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + VkResult getAccelerationStructureOpaqueCaptureDescriptorDataEXT(const VkAccelerationStructureCaptureDescriptorDataInfoEXT* pInfo, void* pData) const noexcept { + return fp_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT(device, pInfo, pData); + } +#endif +#if (defined(VK_ANDROID_external_memory_android_hardware_buffer)) + VkResult getAndroidHardwareBufferPropertiesANDROID(const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties) const noexcept { + return fp_vkGetAndroidHardwareBufferPropertiesANDROID(device, buffer, pProperties); + } +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + VkResult getBufferCollectionPropertiesFUCHSIA(VkBufferCollectionFUCHSIA collection, VkBufferCollectionPropertiesFUCHSIA* pProperties) const noexcept { + return fp_vkGetBufferCollectionPropertiesFUCHSIA(device, collection, pProperties); + } +#endif +#if (defined(VK_VERSION_1_2)) + VkDeviceAddress getBufferDeviceAddress(const VkBufferDeviceAddressInfo* pInfo) const noexcept { + return fp_vkGetBufferDeviceAddress(device, pInfo); + } +#endif +#if (defined(VK_EXT_buffer_device_address)) + VkDeviceAddress getBufferDeviceAddressEXT(const VkBufferDeviceAddressInfoEXT* pInfo) const noexcept { + return fp_vkGetBufferDeviceAddressEXT(device, pInfo); + } +#endif +#if (defined(VK_KHR_buffer_device_address)) + VkDeviceAddress getBufferDeviceAddressKHR(const VkBufferDeviceAddressInfoKHR* pInfo) const noexcept { + return fp_vkGetBufferDeviceAddressKHR(device, pInfo); + } +#endif + void getBufferMemoryRequirements(VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements) const noexcept { + fp_vkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements); + } +#if (defined(VK_VERSION_1_1)) + void getBufferMemoryRequirements2(const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + void getBufferMemoryRequirements2KHR(const VkBufferMemoryRequirementsInfo2KHR* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) const noexcept { + fp_vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_VERSION_1_2)) + uint64_t getBufferOpaqueCaptureAddress(const VkBufferDeviceAddressInfo* pInfo) const noexcept { + return fp_vkGetBufferOpaqueCaptureAddress(device, pInfo); + } +#endif +#if (defined(VK_KHR_buffer_device_address)) + uint64_t getBufferOpaqueCaptureAddressKHR(const VkBufferDeviceAddressInfoKHR* pInfo) const noexcept { + return fp_vkGetBufferOpaqueCaptureAddressKHR(device, pInfo); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + VkResult getBufferOpaqueCaptureDescriptorDataEXT(const VkBufferCaptureDescriptorDataInfoEXT* pInfo, void* pData) const noexcept { + return fp_vkGetBufferOpaqueCaptureDescriptorDataEXT(device, pInfo, pData); + } +#endif +#if (defined(VK_EXT_calibrated_timestamps)) + VkResult getCalibratedTimestampsEXT(uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation) const noexcept { + return fp_vkGetCalibratedTimestampsEXT(device, timestampCount, pTimestampInfos, pTimestamps, pMaxDeviation); + } +#endif +#if (defined(VK_KHR_calibrated_timestamps)) + VkResult getCalibratedTimestampsKHR(uint32_t timestampCount, const VkCalibratedTimestampInfoKHR* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation) const noexcept { + return fp_vkGetCalibratedTimestampsKHR(device, timestampCount, pTimestampInfos, pTimestamps, pMaxDeviation); + } +#endif +#if (defined(VK_NV_cluster_acceleration_structure)) + void getClusterAccelerationStructureBuildSizesNV(const VkClusterAccelerationStructureInputInfoNV* pInfo, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) const noexcept { + fp_vkGetClusterAccelerationStructureBuildSizesNV(device, pInfo, pSizeInfo); + } +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + VkResult getCudaModuleCacheNV(VkCudaModuleNV module, size_t* pCacheSize, void* pCacheData) const noexcept { + return fp_vkGetCudaModuleCacheNV(device, module, pCacheSize, pCacheData); + } +#endif +#if (defined(VK_ARM_data_graph)) + VkResult getDataGraphPipelineAvailablePropertiesARM(const VkDataGraphPipelineInfoARM* pPipelineInfo, uint32_t* pPropertiesCount, VkDataGraphPipelinePropertyARM* pProperties) const noexcept { + return fp_vkGetDataGraphPipelineAvailablePropertiesARM(device, pPipelineInfo, pPropertiesCount, pProperties); + } +#endif +#if (defined(VK_ARM_data_graph)) + VkResult getDataGraphPipelinePropertiesARM(const VkDataGraphPipelineInfoARM* pPipelineInfo, uint32_t propertiesCount, VkDataGraphPipelinePropertyQueryResultARM* pProperties) const noexcept { + return fp_vkGetDataGraphPipelinePropertiesARM(device, pPipelineInfo, propertiesCount, pProperties); + } +#endif +#if (defined(VK_ARM_data_graph)) + VkResult getDataGraphPipelineSessionBindPointRequirementsARM(const VkDataGraphPipelineSessionBindPointRequirementsInfoARM* pInfo, uint32_t* pBindPointRequirementCount, VkDataGraphPipelineSessionBindPointRequirementARM* pBindPointRequirements) const noexcept { + return fp_vkGetDataGraphPipelineSessionBindPointRequirementsARM(device, pInfo, pBindPointRequirementCount, pBindPointRequirements); + } +#endif +#if (defined(VK_ARM_data_graph)) + void getDataGraphPipelineSessionMemoryRequirementsARM(const VkDataGraphPipelineSessionMemoryRequirementsInfoARM* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetDataGraphPipelineSessionMemoryRequirementsARM(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_KHR_deferred_host_operations)) + uint32_t getDeferredOperationMaxConcurrencyKHR(VkDeferredOperationKHR operation) const noexcept { + return fp_vkGetDeferredOperationMaxConcurrencyKHR(device, operation); + } +#endif +#if (defined(VK_KHR_deferred_host_operations)) + VkResult getDeferredOperationResultKHR(VkDeferredOperationKHR operation) const noexcept { + return fp_vkGetDeferredOperationResultKHR(device, operation); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + void getDescriptorEXT(const VkDescriptorGetInfoEXT* pDescriptorInfo, size_t dataSize, void* pDescriptor) const noexcept { + fp_vkGetDescriptorEXT(device, pDescriptorInfo, dataSize, pDescriptor); + } +#endif +#if (defined(VK_VALVE_descriptor_set_host_mapping)) + void getDescriptorSetHostMappingVALVE(VkDescriptorSet descriptorSet, void** ppData) const noexcept { + fp_vkGetDescriptorSetHostMappingVALVE(device, descriptorSet, ppData); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + void getDescriptorSetLayoutBindingOffsetEXT(VkDescriptorSetLayout layout, uint32_t binding, VkDeviceSize* pOffset) const noexcept { + fp_vkGetDescriptorSetLayoutBindingOffsetEXT(device, layout, binding, pOffset); + } +#endif +#if (defined(VK_VALVE_descriptor_set_host_mapping)) + void getDescriptorSetLayoutHostMappingInfoVALVE(const VkDescriptorSetBindingReferenceVALVE* pBindingReference, VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping) const noexcept { + fp_vkGetDescriptorSetLayoutHostMappingInfoVALVE(device, pBindingReference, pHostMapping); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + void getDescriptorSetLayoutSizeEXT(VkDescriptorSetLayout layout, VkDeviceSize* pLayoutSizeInBytes) const noexcept { + fp_vkGetDescriptorSetLayoutSizeEXT(device, layout, pLayoutSizeInBytes); + } +#endif +#if (defined(VK_VERSION_1_1)) + void getDescriptorSetLayoutSupport(const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport) const noexcept { + fp_vkGetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport); + } +#endif +#if (defined(VK_KHR_maintenance3)) + void getDescriptorSetLayoutSupportKHR(const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupportKHR* pSupport) const noexcept { + fp_vkGetDescriptorSetLayoutSupportKHR(device, pCreateInfo, pSupport); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + void getDeviceAccelerationStructureCompatibilityKHR(const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility) const noexcept { + fp_vkGetDeviceAccelerationStructureCompatibilityKHR(device, pVersionInfo, pCompatibility); + } +#endif +#if (defined(VK_VERSION_1_3)) + void getDeviceBufferMemoryRequirements(const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetDeviceBufferMemoryRequirements(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_KHR_maintenance4)) + void getDeviceBufferMemoryRequirementsKHR(const VkDeviceBufferMemoryRequirementsKHR* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) const noexcept { + fp_vkGetDeviceBufferMemoryRequirementsKHR(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_EXT_device_fault)) + VkResult getDeviceFaultInfoEXT(VkDeviceFaultCountsEXT* pFaultCounts, VkDeviceFaultInfoEXT* pFaultInfo) const noexcept { + return fp_vkGetDeviceFaultInfoEXT(device, pFaultCounts, pFaultInfo); + } +#endif +#if (defined(VK_VERSION_1_1)) + void getDeviceGroupPeerMemoryFeatures(uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures) const noexcept { + fp_vkGetDeviceGroupPeerMemoryFeatures(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures); + } +#endif +#if (defined(VK_KHR_device_group)) + void getDeviceGroupPeerMemoryFeaturesKHR(uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures) const noexcept { + fp_vkGetDeviceGroupPeerMemoryFeaturesKHR(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures); + } +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + VkResult getDeviceGroupPresentCapabilitiesKHR(VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities) const noexcept { + return fp_vkGetDeviceGroupPresentCapabilitiesKHR(device, pDeviceGroupPresentCapabilities); + } +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + VkResult getDeviceGroupSurfacePresentModes2EXT(const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes) const noexcept { + return fp_vkGetDeviceGroupSurfacePresentModes2EXT(device, pSurfaceInfo, pModes); + } +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + VkResult getDeviceGroupSurfacePresentModesKHR(VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes) const noexcept { + return fp_vkGetDeviceGroupSurfacePresentModesKHR(device, surface, pModes); + } +#endif +#if (defined(VK_VERSION_1_3)) + void getDeviceImageMemoryRequirements(const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetDeviceImageMemoryRequirements(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_KHR_maintenance4)) + void getDeviceImageMemoryRequirementsKHR(const VkDeviceImageMemoryRequirementsKHR* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) const noexcept { + fp_vkGetDeviceImageMemoryRequirementsKHR(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_VERSION_1_3)) + void getDeviceImageSparseMemoryRequirements(const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements) const noexcept { + fp_vkGetDeviceImageSparseMemoryRequirements(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements); + } +#endif +#if (defined(VK_KHR_maintenance4)) + void getDeviceImageSparseMemoryRequirementsKHR(const VkDeviceImageMemoryRequirementsKHR* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements) const noexcept { + fp_vkGetDeviceImageSparseMemoryRequirementsKHR(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements); + } +#endif +#if (defined(VK_VERSION_1_4)) + void getDeviceImageSubresourceLayout(const VkDeviceImageSubresourceInfo* pInfo, VkSubresourceLayout2* pLayout) const noexcept { + fp_vkGetDeviceImageSubresourceLayout(device, pInfo, pLayout); + } +#endif +#if (defined(VK_KHR_maintenance5)) + void getDeviceImageSubresourceLayoutKHR(const VkDeviceImageSubresourceInfoKHR* pInfo, VkSubresourceLayout2KHR* pLayout) const noexcept { + fp_vkGetDeviceImageSubresourceLayoutKHR(device, pInfo, pLayout); + } +#endif + void getDeviceMemoryCommitment(VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes) const noexcept { + fp_vkGetDeviceMemoryCommitment(device, memory, pCommittedMemoryInBytes); + } +#if (defined(VK_VERSION_1_2)) + uint64_t getDeviceMemoryOpaqueCaptureAddress(const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo) const noexcept { + return fp_vkGetDeviceMemoryOpaqueCaptureAddress(device, pInfo); + } +#endif +#if (defined(VK_KHR_buffer_device_address)) + uint64_t getDeviceMemoryOpaqueCaptureAddressKHR(const VkDeviceMemoryOpaqueCaptureAddressInfoKHR* pInfo) const noexcept { + return fp_vkGetDeviceMemoryOpaqueCaptureAddressKHR(device, pInfo); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + void getDeviceMicromapCompatibilityEXT(const VkMicromapVersionInfoEXT* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility) const noexcept { + fp_vkGetDeviceMicromapCompatibilityEXT(device, pVersionInfo, pCompatibility); + } +#endif + void getDeviceQueue(uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue) const noexcept { + fp_vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue); + } +#if (defined(VK_VERSION_1_1)) + void getDeviceQueue2(const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue) const noexcept { + fp_vkGetDeviceQueue2(device, pQueueInfo, pQueue); + } +#endif +#if (defined(VK_HUAWEI_subpass_shading)) + VkResult getDeviceSubpassShadingMaxWorkgroupSizeHUAWEI(VkRenderPass renderpass, VkExtent2D* pMaxWorkgroupSize) const noexcept { + return fp_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI(device, renderpass, pMaxWorkgroupSize); + } +#endif +#if (defined(VK_ARM_tensors)) + void getDeviceTensorMemoryRequirementsARM(const VkDeviceTensorMemoryRequirementsARM* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetDeviceTensorMemoryRequirementsARM(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_QCOM_tile_properties)) + VkResult getDynamicRenderingTilePropertiesQCOM(const VkRenderingInfo* pRenderingInfo, VkTilePropertiesQCOM* pProperties) const noexcept { + return fp_vkGetDynamicRenderingTilePropertiesQCOM(device, pRenderingInfo, pProperties); + } +#endif +#if (defined(VK_KHR_video_encode_queue)) + VkResult getEncodedVideoSessionParametersKHR(const VkVideoEncodeSessionParametersGetInfoKHR* pVideoSessionParametersInfo, VkVideoEncodeSessionParametersFeedbackInfoKHR* pFeedbackInfo, size_t* pDataSize, void* pData) const noexcept { + return fp_vkGetEncodedVideoSessionParametersKHR(device, pVideoSessionParametersInfo, pFeedbackInfo, pDataSize, pData); + } +#endif + VkResult getEventStatus(VkEvent event) const noexcept { + return fp_vkGetEventStatus(device, event); + } +#if (defined(VK_AMDX_shader_enqueue)) + VkResult getExecutionGraphPipelineNodeIndexAMDX(VkPipeline executionGraph, const VkPipelineShaderStageNodeCreateInfoAMDX* pNodeInfo, uint32_t* pNodeIndex) const noexcept { + return fp_vkGetExecutionGraphPipelineNodeIndexAMDX(device, executionGraph, pNodeInfo, pNodeIndex); + } +#endif +#if (defined(VK_AMDX_shader_enqueue)) + VkResult getExecutionGraphPipelineScratchSizeAMDX(VkPipeline executionGraph, VkExecutionGraphPipelineScratchSizeAMDX* pSizeInfo) const noexcept { + return fp_vkGetExecutionGraphPipelineScratchSizeAMDX(device, executionGraph, pSizeInfo); + } +#endif +#if (defined(VK_NV_external_compute_queue)) + void getExternalComputeQueueDataNV(VkExternalComputeQueueNV externalQueue, VkExternalComputeQueueDataParamsNV* params, void* pData) const noexcept { + fp_vkGetExternalComputeQueueDataNV(externalQueue, params, pData); + } +#endif +#if (defined(VK_KHR_external_fence_fd)) + VkResult getFenceFdKHR(const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) const noexcept { + return fp_vkGetFenceFdKHR(device, pGetFdInfo, pFd); + } +#endif + VkResult getFenceStatus(VkFence fence) const noexcept { + return fp_vkGetFenceStatus(device, fence); + } +#if (defined(VK_KHR_external_fence_win32)) + VkResult getFenceWin32HandleKHR(const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle) const noexcept { + return fp_vkGetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle); + } +#endif +#if (defined(VK_QCOM_tile_properties)) + VkResult getFramebufferTilePropertiesQCOM(VkFramebuffer framebuffer, uint32_t* pPropertiesCount, VkTilePropertiesQCOM* pProperties) const noexcept { + return fp_vkGetFramebufferTilePropertiesQCOM(device, framebuffer, pPropertiesCount, pProperties); + } +#endif +#if (defined(VK_EXT_device_generated_commands)) + void getGeneratedCommandsMemoryRequirementsEXT(const VkGeneratedCommandsMemoryRequirementsInfoEXT* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetGeneratedCommandsMemoryRequirementsEXT(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_NV_device_generated_commands)) + void getGeneratedCommandsMemoryRequirementsNV(const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetGeneratedCommandsMemoryRequirementsNV(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_EXT_image_drm_format_modifier)) + VkResult getImageDrmFormatModifierPropertiesEXT(VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties) const noexcept { + return fp_vkGetImageDrmFormatModifierPropertiesEXT(device, image, pProperties); + } +#endif + void getImageMemoryRequirements(VkImage image, VkMemoryRequirements* pMemoryRequirements) const noexcept { + fp_vkGetImageMemoryRequirements(device, image, pMemoryRequirements); + } +#if (defined(VK_VERSION_1_1)) + void getImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + void getImageMemoryRequirements2KHR(const VkImageMemoryRequirementsInfo2KHR* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) const noexcept { + fp_vkGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + VkResult getImageOpaqueCaptureDescriptorDataEXT(const VkImageCaptureDescriptorDataInfoEXT* pInfo, void* pData) const noexcept { + return fp_vkGetImageOpaqueCaptureDescriptorDataEXT(device, pInfo, pData); + } +#endif + void getImageSparseMemoryRequirements(VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements) const noexcept { + fp_vkGetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements); + } +#if (defined(VK_VERSION_1_1)) + void getImageSparseMemoryRequirements2(const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements) const noexcept { + fp_vkGetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements); + } +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + void getImageSparseMemoryRequirements2KHR(const VkImageSparseMemoryRequirementsInfo2KHR* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements) const noexcept { + fp_vkGetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements); + } +#endif + void getImageSubresourceLayout(VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const noexcept { + fp_vkGetImageSubresourceLayout(device, image, pSubresource, pLayout); + } +#if (defined(VK_VERSION_1_4)) + void getImageSubresourceLayout2(VkImage image, const VkImageSubresource2* pSubresource, VkSubresourceLayout2* pLayout) const noexcept { + fp_vkGetImageSubresourceLayout2(device, image, pSubresource, pLayout); + } +#endif +#if (defined(VK_EXT_host_image_copy) || defined(VK_EXT_image_compression_control)) + void getImageSubresourceLayout2EXT(VkImage image, const VkImageSubresource2EXT* pSubresource, VkSubresourceLayout2EXT* pLayout) const noexcept { + fp_vkGetImageSubresourceLayout2EXT(device, image, pSubresource, pLayout); + } +#endif +#if (defined(VK_KHR_maintenance5)) + void getImageSubresourceLayout2KHR(VkImage image, const VkImageSubresource2KHR* pSubresource, VkSubresourceLayout2KHR* pLayout) const noexcept { + fp_vkGetImageSubresourceLayout2KHR(device, image, pSubresource, pLayout); + } +#endif +#if (defined(VK_NVX_image_view_handle)) + VkResult getImageViewAddressNVX(VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties) const noexcept { + return fp_vkGetImageViewAddressNVX(device, imageView, pProperties); + } +#endif +#if (defined(VK_NVX_image_view_handle)) + uint64_t getImageViewHandle64NVX(const VkImageViewHandleInfoNVX* pInfo) const noexcept { + return fp_vkGetImageViewHandle64NVX(device, pInfo); + } +#endif +#if (defined(VK_NVX_image_view_handle)) + uint32_t getImageViewHandleNVX(const VkImageViewHandleInfoNVX* pInfo) const noexcept { + return fp_vkGetImageViewHandleNVX(device, pInfo); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + VkResult getImageViewOpaqueCaptureDescriptorDataEXT(const VkImageViewCaptureDescriptorDataInfoEXT* pInfo, void* pData) const noexcept { + return fp_vkGetImageViewOpaqueCaptureDescriptorDataEXT(device, pInfo, pData); + } +#endif +#if (defined(VK_NV_low_latency2)) && VK_HEADER_VERSION >= 271 + void getLatencyTimingsNV(VkSwapchainKHR swapchain, VkGetLatencyMarkerInfoNV* pLatencyMarkerInfo) const noexcept { + fp_vkGetLatencyTimingsNV(device, swapchain, pLatencyMarkerInfo); + } +#endif +#if (defined(VK_ANDROID_external_memory_android_hardware_buffer)) + VkResult getMemoryAndroidHardwareBufferANDROID(const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer) const noexcept { + return fp_vkGetMemoryAndroidHardwareBufferANDROID(device, pInfo, pBuffer); + } +#endif +#if (defined(VK_KHR_external_memory_fd)) + VkResult getMemoryFdKHR(const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) const noexcept { + return fp_vkGetMemoryFdKHR(device, pGetFdInfo, pFd); + } +#endif +#if (defined(VK_KHR_external_memory_fd)) + VkResult getMemoryFdPropertiesKHR(VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties) const noexcept { + return fp_vkGetMemoryFdPropertiesKHR(device, handleType, fd, pMemoryFdProperties); + } +#endif +#if (defined(VK_EXT_external_memory_host)) + VkResult getMemoryHostPointerPropertiesEXT(VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties) const noexcept { + return fp_vkGetMemoryHostPointerPropertiesEXT(device, handleType, pHostPointer, pMemoryHostPointerProperties); + } +#endif +#if (defined(VK_EXT_external_memory_metal)) + VkResult getMemoryMetalHandleEXT(const VkMemoryGetMetalHandleInfoEXT* pGetMetalHandleInfo, void** pHandle) const noexcept { + return fp_vkGetMemoryMetalHandleEXT(device, pGetMetalHandleInfo, pHandle); + } +#endif +#if (defined(VK_EXT_external_memory_metal)) + VkResult getMemoryMetalHandlePropertiesEXT(VkExternalMemoryHandleTypeFlagBits handleType, const void* pHandle, VkMemoryMetalHandlePropertiesEXT* pMemoryMetalHandleProperties) const noexcept { + return fp_vkGetMemoryMetalHandlePropertiesEXT(device, handleType, pHandle, pMemoryMetalHandleProperties); + } +#endif +#if (defined(VK_NV_external_memory_rdma)) + VkResult getMemoryRemoteAddressNV(const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo, VkRemoteAddressNV* pAddress) const noexcept { + return fp_vkGetMemoryRemoteAddressNV(device, pMemoryGetRemoteAddressInfo, pAddress); + } +#endif +#if (defined(VK_KHR_external_memory_win32)) + VkResult getMemoryWin32HandleKHR(const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle) const noexcept { + return fp_vkGetMemoryWin32HandleKHR(device, pGetWin32HandleInfo, pHandle); + } +#endif +#if (defined(VK_NV_external_memory_win32)) + VkResult getMemoryWin32HandleNV(VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle) const noexcept { + return fp_vkGetMemoryWin32HandleNV(device, memory, handleType, pHandle); + } +#endif +#if (defined(VK_KHR_external_memory_win32)) + VkResult getMemoryWin32HandlePropertiesKHR(VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties) const noexcept { + return fp_vkGetMemoryWin32HandlePropertiesKHR(device, handleType, handle, pMemoryWin32HandleProperties); + } +#endif +#if (defined(VK_FUCHSIA_external_memory)) + VkResult getMemoryZirconHandleFUCHSIA(const VkMemoryGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo, zx_handle_t* pZirconHandle) const noexcept { + return fp_vkGetMemoryZirconHandleFUCHSIA(device, pGetZirconHandleInfo, pZirconHandle); + } +#endif +#if (defined(VK_FUCHSIA_external_memory)) + VkResult getMemoryZirconHandlePropertiesFUCHSIA(VkExternalMemoryHandleTypeFlagBits handleType, zx_handle_t zirconHandle, VkMemoryZirconHandlePropertiesFUCHSIA* pMemoryZirconHandleProperties) const noexcept { + return fp_vkGetMemoryZirconHandlePropertiesFUCHSIA(device, handleType, zirconHandle, pMemoryZirconHandleProperties); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + void getMicromapBuildSizesEXT(VkAccelerationStructureBuildTypeKHR buildType, const VkMicromapBuildInfoEXT* pBuildInfo, VkMicromapBuildSizesInfoEXT* pSizeInfo) const noexcept { + fp_vkGetMicromapBuildSizesEXT(device, buildType, pBuildInfo, pSizeInfo); + } +#endif +#if (defined(VK_NV_partitioned_acceleration_structure)) + void getPartitionedAccelerationStructuresBuildSizesNV(const VkPartitionedAccelerationStructureInstancesInputNV* pInfo, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) const noexcept { + fp_vkGetPartitionedAccelerationStructuresBuildSizesNV(device, pInfo, pSizeInfo); + } +#endif +#if (defined(VK_GOOGLE_display_timing)) + VkResult getPastPresentationTimingGOOGLE(VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings) const noexcept { + return fp_vkGetPastPresentationTimingGOOGLE(device, swapchain, pPresentationTimingCount, pPresentationTimings); + } +#endif +#if (defined(VK_INTEL_performance_query)) + VkResult getPerformanceParameterINTEL(VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue) const noexcept { + return fp_vkGetPerformanceParameterINTEL(device, parameter, pValue); + } +#endif +#if (defined(VK_KHR_pipeline_binary)) + VkResult getPipelineBinaryDataKHR(const VkPipelineBinaryDataInfoKHR* pInfo, VkPipelineBinaryKeyKHR* pPipelineBinaryKey, size_t* pPipelineBinaryDataSize, void* pPipelineBinaryData) const noexcept { + return fp_vkGetPipelineBinaryDataKHR(device, pInfo, pPipelineBinaryKey, pPipelineBinaryDataSize, pPipelineBinaryData); + } +#endif + VkResult getPipelineCacheData(VkPipelineCache pipelineCache, size_t* pDataSize, void* pData) const noexcept { + return fp_vkGetPipelineCacheData(device, pipelineCache, pDataSize, pData); + } +#if (defined(VK_KHR_pipeline_executable_properties)) + VkResult getPipelineExecutableInternalRepresentationsKHR(const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations) const noexcept { + return fp_vkGetPipelineExecutableInternalRepresentationsKHR(device, pExecutableInfo, pInternalRepresentationCount, pInternalRepresentations); + } +#endif +#if (defined(VK_KHR_pipeline_executable_properties)) + VkResult getPipelineExecutablePropertiesKHR(const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties) const noexcept { + return fp_vkGetPipelineExecutablePropertiesKHR(device, pPipelineInfo, pExecutableCount, pProperties); + } +#endif +#if (defined(VK_KHR_pipeline_executable_properties)) + VkResult getPipelineExecutableStatisticsKHR(const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics) const noexcept { + return fp_vkGetPipelineExecutableStatisticsKHR(device, pExecutableInfo, pStatisticCount, pStatistics); + } +#endif +#if (defined(VK_NV_device_generated_commands_compute)) + VkDeviceAddress getPipelineIndirectDeviceAddressNV(const VkPipelineIndirectDeviceAddressInfoNV* pInfo) const noexcept { + return fp_vkGetPipelineIndirectDeviceAddressNV(device, pInfo); + } +#endif +#if (defined(VK_NV_device_generated_commands_compute)) + void getPipelineIndirectMemoryRequirementsNV(const VkComputePipelineCreateInfo* pCreateInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetPipelineIndirectMemoryRequirementsNV(device, pCreateInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_KHR_pipeline_binary)) + VkResult getPipelineKeyKHR(const VkPipelineCreateInfoKHR* pPipelineCreateInfo, VkPipelineBinaryKeyKHR* pPipelineKey) const noexcept { + return fp_vkGetPipelineKeyKHR(device, pPipelineCreateInfo, pPipelineKey); + } +#endif +#if (defined(VK_EXT_pipeline_properties)) + VkResult getPipelinePropertiesEXT(const VkPipelineInfoEXT* pPipelineInfo, VkBaseOutStructure* pPipelineProperties) const noexcept { + return fp_vkGetPipelinePropertiesEXT(device, pPipelineInfo, pPipelineProperties); + } +#endif +#if (defined(VK_VERSION_1_3)) + void getPrivateData(VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData) const noexcept { + fp_vkGetPrivateData(device, objectType, objectHandle, privateDataSlot, pData); + } +#endif +#if (defined(VK_EXT_private_data)) + void getPrivateDataEXT(VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData) const noexcept { + fp_vkGetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, pData); + } +#endif + VkResult getQueryPoolResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags) const noexcept { + return fp_vkGetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags); + } +#if (defined(VK_NV_device_diagnostic_checkpoints)) + void getQueueCheckpointData2NV(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointData2NV* pCheckpointData) const noexcept { + fp_vkGetQueueCheckpointData2NV(queue, pCheckpointDataCount, pCheckpointData); + } +#endif +#if (defined(VK_NV_device_diagnostic_checkpoints)) + void getQueueCheckpointDataNV(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData) const noexcept { + fp_vkGetQueueCheckpointDataNV(queue, pCheckpointDataCount, pCheckpointData); + } +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + VkResult getRayTracingCaptureReplayShaderGroupHandlesKHR(VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) const noexcept { + return fp_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData); + } +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + VkResult getRayTracingShaderGroupHandlesKHR(VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) const noexcept { + return fp_vkGetRayTracingShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData); + } +#endif +#if (defined(VK_NV_ray_tracing)) + VkResult getRayTracingShaderGroupHandlesNV(VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) const noexcept { + return fp_vkGetRayTracingShaderGroupHandlesNV(device, pipeline, firstGroup, groupCount, dataSize, pData); + } +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + VkDeviceSize getRayTracingShaderGroupStackSizeKHR(VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader) const noexcept { + return fp_vkGetRayTracingShaderGroupStackSizeKHR(device, pipeline, group, groupShader); + } +#endif +#if (defined(VK_GOOGLE_display_timing)) + VkResult getRefreshCycleDurationGOOGLE(VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties) const noexcept { + return fp_vkGetRefreshCycleDurationGOOGLE(device, swapchain, pDisplayTimingProperties); + } +#endif + void getRenderAreaGranularity(VkRenderPass renderPass, VkExtent2D* pGranularity) const noexcept { + fp_vkGetRenderAreaGranularity(device, renderPass, pGranularity); + } +#if (defined(VK_VERSION_1_4)) + void getRenderingAreaGranularity(const VkRenderingAreaInfo* pRenderingAreaInfo, VkExtent2D* pGranularity) const noexcept { + fp_vkGetRenderingAreaGranularity(device, pRenderingAreaInfo, pGranularity); + } +#endif +#if (defined(VK_KHR_maintenance5)) + void getRenderingAreaGranularityKHR(const VkRenderingAreaInfoKHR* pRenderingAreaInfo, VkExtent2D* pGranularity) const noexcept { + fp_vkGetRenderingAreaGranularityKHR(device, pRenderingAreaInfo, pGranularity); + } +#endif +#if (defined(VK_EXT_descriptor_buffer)) + VkResult getSamplerOpaqueCaptureDescriptorDataEXT(const VkSamplerCaptureDescriptorDataInfoEXT* pInfo, void* pData) const noexcept { + return fp_vkGetSamplerOpaqueCaptureDescriptorDataEXT(device, pInfo, pData); + } +#endif +#if (defined(VK_QNX_external_memory_screen_buffer)) + VkResult getScreenBufferPropertiesQNX(const struct _screen_buffer* buffer, VkScreenBufferPropertiesQNX* pProperties) const noexcept { + return fp_vkGetScreenBufferPropertiesQNX(device, buffer, pProperties); + } +#endif +#if (defined(VK_VERSION_1_2)) + VkResult getSemaphoreCounterValue(VkSemaphore semaphore, uint64_t* pValue) const noexcept { + return fp_vkGetSemaphoreCounterValue(device, semaphore, pValue); + } +#endif +#if (defined(VK_KHR_timeline_semaphore)) + VkResult getSemaphoreCounterValueKHR(VkSemaphore semaphore, uint64_t* pValue) const noexcept { + return fp_vkGetSemaphoreCounterValueKHR(device, semaphore, pValue); + } +#endif +#if (defined(VK_KHR_external_semaphore_fd)) + VkResult getSemaphoreFdKHR(const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd) const noexcept { + return fp_vkGetSemaphoreFdKHR(device, pGetFdInfo, pFd); + } +#endif +#if (defined(VK_KHR_external_semaphore_win32)) + VkResult getSemaphoreWin32HandleKHR(const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle) const noexcept { + return fp_vkGetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle); + } +#endif +#if (defined(VK_FUCHSIA_external_semaphore)) + VkResult getSemaphoreZirconHandleFUCHSIA(const VkSemaphoreGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo, zx_handle_t* pZirconHandle) const noexcept { + return fp_vkGetSemaphoreZirconHandleFUCHSIA(device, pGetZirconHandleInfo, pZirconHandle); + } +#endif +#if (defined(VK_EXT_shader_object)) + VkResult getShaderBinaryDataEXT(VkShaderEXT shader, size_t* pDataSize, void* pData) const noexcept { + return fp_vkGetShaderBinaryDataEXT(device, shader, pDataSize, pData); + } +#endif +#if (defined(VK_AMD_shader_info)) + VkResult getShaderInfoAMD(VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo) const noexcept { + return fp_vkGetShaderInfoAMD(device, pipeline, shaderStage, infoType, pInfoSize, pInfo); + } +#endif +#if (defined(VK_EXT_shader_module_identifier)) + void getShaderModuleCreateInfoIdentifierEXT(const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModuleIdentifierEXT* pIdentifier) const noexcept { + fp_vkGetShaderModuleCreateInfoIdentifierEXT(device, pCreateInfo, pIdentifier); + } +#endif +#if (defined(VK_EXT_shader_module_identifier)) + void getShaderModuleIdentifierEXT(VkShaderModule shaderModule, VkShaderModuleIdentifierEXT* pIdentifier) const noexcept { + fp_vkGetShaderModuleIdentifierEXT(device, shaderModule, pIdentifier); + } +#endif +#if (defined(VK_EXT_display_control)) + VkResult getSwapchainCounterEXT(VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue) const noexcept { + return fp_vkGetSwapchainCounterEXT(device, swapchain, counter, pCounterValue); + } +#endif +#if (defined(VK_OHOS_native_buffer)) + VkResult getSwapchainGrallocUsageOHOS(VkFormat format, VkImageUsageFlags imageUsage, uint64_t* grallocUsage) const noexcept { + return fp_vkGetSwapchainGrallocUsageOHOS(device, format, imageUsage, grallocUsage); + } +#endif +#if (defined(VK_KHR_swapchain)) + VkResult getSwapchainImagesKHR(VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages) const noexcept { + return fp_vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); + } +#endif +#if (defined(VK_KHR_shared_presentable_image)) + VkResult getSwapchainStatusKHR(VkSwapchainKHR swapchain) const noexcept { + return fp_vkGetSwapchainStatusKHR(device, swapchain); + } +#endif +#if (defined(VK_ARM_tensors)) + void getTensorMemoryRequirementsARM(const VkTensorMemoryRequirementsInfoARM* pInfo, VkMemoryRequirements2* pMemoryRequirements) const noexcept { + fp_vkGetTensorMemoryRequirementsARM(device, pInfo, pMemoryRequirements); + } +#endif +#if (defined(VK_ARM_tensors)) + VkResult getTensorOpaqueCaptureDescriptorDataARM(const VkTensorCaptureDescriptorDataInfoARM* pInfo, void* pData) const noexcept { + return fp_vkGetTensorOpaqueCaptureDescriptorDataARM(device, pInfo, pData); + } +#endif +#if (defined(VK_ARM_tensors)) + VkResult getTensorViewOpaqueCaptureDescriptorDataARM(const VkTensorViewCaptureDescriptorDataInfoARM* pInfo, void* pData) const noexcept { + return fp_vkGetTensorViewOpaqueCaptureDescriptorDataARM(device, pInfo, pData); + } +#endif +#if (defined(VK_EXT_validation_cache)) + VkResult getValidationCacheDataEXT(VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData) const noexcept { + return fp_vkGetValidationCacheDataEXT(device, validationCache, pDataSize, pData); + } +#endif +#if (defined(VK_KHR_video_queue)) + VkResult getVideoSessionMemoryRequirementsKHR(VkVideoSessionKHR videoSession, uint32_t* pMemoryRequirementsCount, VkVideoSessionMemoryRequirementsKHR* pMemoryRequirements) const noexcept { + return fp_vkGetVideoSessionMemoryRequirementsKHR(device, videoSession, pMemoryRequirementsCount, pMemoryRequirements); + } +#endif +#if (defined(VK_KHR_external_fence_fd)) + VkResult importFenceFdKHR(const VkImportFenceFdInfoKHR* pImportFenceFdInfo) const noexcept { + return fp_vkImportFenceFdKHR(device, pImportFenceFdInfo); + } +#endif +#if (defined(VK_KHR_external_fence_win32)) + VkResult importFenceWin32HandleKHR(const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo) const noexcept { + return fp_vkImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo); + } +#endif +#if (defined(VK_KHR_external_semaphore_fd)) + VkResult importSemaphoreFdKHR(const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) const noexcept { + return fp_vkImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo); + } +#endif +#if (defined(VK_KHR_external_semaphore_win32)) + VkResult importSemaphoreWin32HandleKHR(const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo) const noexcept { + return fp_vkImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo); + } +#endif +#if (defined(VK_FUCHSIA_external_semaphore)) + VkResult importSemaphoreZirconHandleFUCHSIA(const VkImportSemaphoreZirconHandleInfoFUCHSIA* pImportSemaphoreZirconHandleInfo) const noexcept { + return fp_vkImportSemaphoreZirconHandleFUCHSIA(device, pImportSemaphoreZirconHandleInfo); + } +#endif +#if (defined(VK_INTEL_performance_query)) + VkResult initializePerformanceApiINTEL(const VkInitializePerformanceApiInfoINTEL* pInitializeInfo) const noexcept { + return fp_vkInitializePerformanceApiINTEL(device, pInitializeInfo); + } +#endif + VkResult invalidateMappedMemoryRanges(uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges) const noexcept { + return fp_vkInvalidateMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges); + } +#if (defined(VK_NV_low_latency2)) + VkResult latencySleepNV(VkSwapchainKHR swapchain, const VkLatencySleepInfoNV* pSleepInfo) const noexcept { + return fp_vkLatencySleepNV(device, swapchain, pSleepInfo); + } +#endif + VkResult mapMemory(VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData) const noexcept { + return fp_vkMapMemory(device, memory, offset, size, flags, ppData); + } +#if (defined(VK_VERSION_1_4)) + VkResult mapMemory2(const VkMemoryMapInfo* pMemoryMapInfo, void** ppData) const noexcept { + return fp_vkMapMemory2(device, pMemoryMapInfo, ppData); + } +#endif +#if (defined(VK_KHR_map_memory2)) + VkResult mapMemory2KHR(const VkMemoryMapInfoKHR* pMemoryMapInfo, void** ppData) const noexcept { + return fp_vkMapMemory2KHR(device, pMemoryMapInfo, ppData); + } +#endif + VkResult mergePipelineCaches(VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches) const noexcept { + return fp_vkMergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches); + } +#if (defined(VK_EXT_validation_cache)) + VkResult mergeValidationCachesEXT(VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches) const noexcept { + return fp_vkMergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches); + } +#endif +#if (defined(VK_EXT_debug_utils)) + void queueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo) const noexcept { + fp_vkQueueBeginDebugUtilsLabelEXT(queue, pLabelInfo); + } +#endif + VkResult queueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence) const noexcept { + return fp_vkQueueBindSparse(queue, bindInfoCount, pBindInfo, fence); + } +#if (defined(VK_EXT_debug_utils)) + void queueEndDebugUtilsLabelEXT(VkQueue queue) const noexcept { + fp_vkQueueEndDebugUtilsLabelEXT(queue); + } +#endif +#if (defined(VK_EXT_debug_utils)) + void queueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo) const noexcept { + fp_vkQueueInsertDebugUtilsLabelEXT(queue, pLabelInfo); + } +#endif +#if (defined(VK_NV_low_latency2)) + void queueNotifyOutOfBandNV(VkQueue queue, const VkOutOfBandQueueTypeInfoNV* pQueueTypeInfo) const noexcept { + fp_vkQueueNotifyOutOfBandNV(queue, pQueueTypeInfo); + } +#endif +#if (defined(VK_KHR_swapchain)) + VkResult queuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo) const noexcept { + return fp_vkQueuePresentKHR(queue, pPresentInfo); + } +#endif +#if (defined(VK_INTEL_performance_query)) + VkResult queueSetPerformanceConfigurationINTEL(VkQueue queue, VkPerformanceConfigurationINTEL configuration) const noexcept { + return fp_vkQueueSetPerformanceConfigurationINTEL(queue, configuration); + } +#endif +#if (defined(VK_OHOS_native_buffer)) + VkResult queueSignalReleaseImageOHOS(VkQueue queue, uint32_t waitSemaphoreCount, const VkSemaphore* pWaitSemaphores, VkImage image, int32_t* pNativeFenceFd) const noexcept { + return fp_vkQueueSignalReleaseImageOHOS(queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd); + } +#endif + VkResult queueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) const noexcept { + return fp_vkQueueSubmit(queue, submitCount, pSubmits, fence); + } +#if (defined(VK_VERSION_1_3)) + VkResult queueSubmit2(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence) const noexcept { + return fp_vkQueueSubmit2(queue, submitCount, pSubmits, fence); + } +#endif +#if (defined(VK_KHR_synchronization2)) + VkResult queueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR* pSubmits, VkFence fence) const noexcept { + return fp_vkQueueSubmit2KHR(queue, submitCount, pSubmits, fence); + } +#endif + VkResult queueWaitIdle(VkQueue queue) const noexcept { + return fp_vkQueueWaitIdle(queue); + } +#if (defined(VK_EXT_display_control)) + VkResult registerDeviceEventEXT(const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence) const noexcept { + return fp_vkRegisterDeviceEventEXT(device, pDeviceEventInfo, pAllocator, pFence); + } +#endif +#if (defined(VK_EXT_display_control)) + VkResult registerDisplayEventEXT(VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence) const noexcept { + return fp_vkRegisterDisplayEventEXT(device, display, pDisplayEventInfo, pAllocator, pFence); + } +#endif +#if (defined(VK_KHR_pipeline_binary)) + VkResult releaseCapturedPipelineDataKHR(const VkReleaseCapturedPipelineDataInfoKHR* pInfo, const VkAllocationCallbacks* pAllocator) const noexcept { + return fp_vkReleaseCapturedPipelineDataKHR(device, pInfo, pAllocator); + } +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + VkResult releaseFullScreenExclusiveModeEXT(VkSwapchainKHR swapchain) const noexcept { + return fp_vkReleaseFullScreenExclusiveModeEXT(device, swapchain); + } +#endif +#if (defined(VK_INTEL_performance_query)) + VkResult releasePerformanceConfigurationINTEL(VkPerformanceConfigurationINTEL configuration) const noexcept { + return fp_vkReleasePerformanceConfigurationINTEL(device, configuration); + } +#endif +#if (defined(VK_KHR_performance_query)) + void releaseProfilingLockKHR() const noexcept { + fp_vkReleaseProfilingLockKHR(device); + } +#endif +#if (defined(VK_EXT_swapchain_maintenance1)) + VkResult releaseSwapchainImagesEXT(const VkReleaseSwapchainImagesInfoEXT* pReleaseInfo) const noexcept { + return fp_vkReleaseSwapchainImagesEXT(device, pReleaseInfo); + } +#endif +#if (defined(VK_KHR_swapchain_maintenance1)) + VkResult releaseSwapchainImagesKHR(const VkReleaseSwapchainImagesInfoKHR* pReleaseInfo) const noexcept { + return fp_vkReleaseSwapchainImagesKHR(device, pReleaseInfo); + } +#endif + VkResult resetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const noexcept { + return fp_vkResetCommandBuffer(commandBuffer, flags); + } + VkResult resetCommandPool(VkCommandPool commandPool, VkCommandPoolResetFlags flags) const noexcept { + return fp_vkResetCommandPool(device, commandPool, flags); + } + VkResult resetDescriptorPool(VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) const noexcept { + return fp_vkResetDescriptorPool(device, descriptorPool, flags); + } + VkResult resetEvent(VkEvent event) const noexcept { + return fp_vkResetEvent(device, event); + } + VkResult resetFences(uint32_t fenceCount, const VkFence* pFences) const noexcept { + return fp_vkResetFences(device, fenceCount, pFences); + } +#if (defined(VK_VERSION_1_2)) + void resetQueryPool(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const noexcept { + fp_vkResetQueryPool(device, queryPool, firstQuery, queryCount); + } +#endif +#if (defined(VK_EXT_host_query_reset)) + void resetQueryPoolEXT(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const noexcept { + fp_vkResetQueryPoolEXT(device, queryPool, firstQuery, queryCount); + } +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + VkResult setBufferCollectionBufferConstraintsFUCHSIA(VkBufferCollectionFUCHSIA collection, const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) const noexcept { + return fp_vkSetBufferCollectionBufferConstraintsFUCHSIA(device, collection, pBufferConstraintsInfo); + } +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + VkResult setBufferCollectionImageConstraintsFUCHSIA(VkBufferCollectionFUCHSIA collection, const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) const noexcept { + return fp_vkSetBufferCollectionImageConstraintsFUCHSIA(device, collection, pImageConstraintsInfo); + } +#endif +#if (defined(VK_EXT_debug_utils)) + VkResult setDebugUtilsObjectNameEXT(const VkDebugUtilsObjectNameInfoEXT* pNameInfo) const noexcept { + return fp_vkSetDebugUtilsObjectNameEXT(device, pNameInfo); + } +#endif +#if (defined(VK_EXT_debug_utils)) + VkResult setDebugUtilsObjectTagEXT(const VkDebugUtilsObjectTagInfoEXT* pTagInfo) const noexcept { + return fp_vkSetDebugUtilsObjectTagEXT(device, pTagInfo); + } +#endif +#if (defined(VK_EXT_pageable_device_local_memory)) + void setDeviceMemoryPriorityEXT(VkDeviceMemory memory, float priority) const noexcept { + fp_vkSetDeviceMemoryPriorityEXT(device, memory, priority); + } +#endif + VkResult setEvent(VkEvent event) const noexcept { + return fp_vkSetEvent(device, event); + } +#if (defined(VK_EXT_hdr_metadata)) + void setHdrMetadataEXT(uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata) const noexcept { + fp_vkSetHdrMetadataEXT(device, swapchainCount, pSwapchains, pMetadata); + } +#endif +#if (defined(VK_NV_low_latency2)) + void setLatencyMarkerNV(VkSwapchainKHR swapchain, const VkSetLatencyMarkerInfoNV* pLatencyMarkerInfo) const noexcept { + fp_vkSetLatencyMarkerNV(device, swapchain, pLatencyMarkerInfo); + } +#endif +#if (defined(VK_NV_low_latency2)) + VkResult setLatencySleepModeNV(VkSwapchainKHR swapchain, const VkLatencySleepModeInfoNV* pSleepModeInfo) const noexcept { + return fp_vkSetLatencySleepModeNV(device, swapchain, pSleepModeInfo); + } +#endif +#if (defined(VK_AMD_display_native_hdr)) + void setLocalDimmingAMD(VkSwapchainKHR swapChain, VkBool32 localDimmingEnable) const noexcept { + fp_vkSetLocalDimmingAMD(device, swapChain, localDimmingEnable); + } +#endif +#if (defined(VK_VERSION_1_3)) + VkResult setPrivateData(VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data) const noexcept { + return fp_vkSetPrivateData(device, objectType, objectHandle, privateDataSlot, data); + } +#endif +#if (defined(VK_EXT_private_data)) + VkResult setPrivateDataEXT(VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data) const noexcept { + return fp_vkSetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, data); + } +#endif +#if (defined(VK_VERSION_1_2)) + VkResult signalSemaphore(const VkSemaphoreSignalInfo* pSignalInfo) const noexcept { + return fp_vkSignalSemaphore(device, pSignalInfo); + } +#endif +#if (defined(VK_KHR_timeline_semaphore)) + VkResult signalSemaphoreKHR(const VkSemaphoreSignalInfoKHR* pSignalInfo) const noexcept { + return fp_vkSignalSemaphoreKHR(device, pSignalInfo); + } +#endif +#if (defined(VK_VERSION_1_4)) + VkResult transitionImageLayout(uint32_t transitionCount, const VkHostImageLayoutTransitionInfo* pTransitions) const noexcept { + return fp_vkTransitionImageLayout(device, transitionCount, pTransitions); + } +#endif +#if (defined(VK_EXT_host_image_copy)) + VkResult transitionImageLayoutEXT(uint32_t transitionCount, const VkHostImageLayoutTransitionInfoEXT* pTransitions) const noexcept { + return fp_vkTransitionImageLayoutEXT(device, transitionCount, pTransitions); + } +#endif +#if (defined(VK_VERSION_1_1)) + void trimCommandPool(VkCommandPool commandPool, VkCommandPoolTrimFlags flags) const noexcept { + fp_vkTrimCommandPool(device, commandPool, flags); + } +#endif +#if (defined(VK_KHR_maintenance1)) + void trimCommandPoolKHR(VkCommandPool commandPool, VkCommandPoolTrimFlags flags) const noexcept { + fp_vkTrimCommandPoolKHR(device, commandPool, flags); + } +#endif +#if (defined(VK_INTEL_performance_query)) + void uninitializePerformanceApiINTEL() const noexcept { + fp_vkUninitializePerformanceApiINTEL(device); + } +#endif + void unmapMemory(VkDeviceMemory memory) const noexcept { + fp_vkUnmapMemory(device, memory); + } +#if (defined(VK_VERSION_1_4)) + VkResult unmapMemory2(const VkMemoryUnmapInfo* pMemoryUnmapInfo) const noexcept { + return fp_vkUnmapMemory2(device, pMemoryUnmapInfo); + } +#endif +#if (defined(VK_KHR_map_memory2)) + VkResult unmapMemory2KHR(const VkMemoryUnmapInfoKHR* pMemoryUnmapInfo) const noexcept { + return fp_vkUnmapMemory2KHR(device, pMemoryUnmapInfo); + } +#endif +#if (defined(VK_VERSION_1_1)) + void updateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) const noexcept { + fp_vkUpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData); + } +#endif +#if (defined(VK_KHR_descriptor_update_template)) + void updateDescriptorSetWithTemplateKHR(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData) const noexcept { + fp_vkUpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData); + } +#endif + void updateDescriptorSets(uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies) const noexcept { + fp_vkUpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); + } +#if (defined(VK_EXT_device_generated_commands)) + void updateIndirectExecutionSetPipelineEXT(VkIndirectExecutionSetEXT indirectExecutionSet, uint32_t executionSetWriteCount, const VkWriteIndirectExecutionSetPipelineEXT* pExecutionSetWrites) const noexcept { + fp_vkUpdateIndirectExecutionSetPipelineEXT(device, indirectExecutionSet, executionSetWriteCount, pExecutionSetWrites); + } +#endif +#if (defined(VK_EXT_device_generated_commands)) + void updateIndirectExecutionSetShaderEXT(VkIndirectExecutionSetEXT indirectExecutionSet, uint32_t executionSetWriteCount, const VkWriteIndirectExecutionSetShaderEXT* pExecutionSetWrites) const noexcept { + fp_vkUpdateIndirectExecutionSetShaderEXT(device, indirectExecutionSet, executionSetWriteCount, pExecutionSetWrites); + } +#endif +#if (defined(VK_KHR_video_queue)) + VkResult updateVideoSessionParametersKHR(VkVideoSessionParametersKHR videoSessionParameters, const VkVideoSessionParametersUpdateInfoKHR* pUpdateInfo) const noexcept { + return fp_vkUpdateVideoSessionParametersKHR(device, videoSessionParameters, pUpdateInfo); + } +#endif + VkResult waitForFences(uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout) const noexcept { + return fp_vkWaitForFences(device, fenceCount, pFences, waitAll, timeout); + } +#if (defined(VK_KHR_present_wait2)) + VkResult waitForPresent2KHR(VkSwapchainKHR swapchain, const VkPresentWait2InfoKHR* pPresentWait2Info) const noexcept { + return fp_vkWaitForPresent2KHR(device, swapchain, pPresentWait2Info); + } +#endif +#if (defined(VK_KHR_present_wait)) + VkResult waitForPresentKHR(VkSwapchainKHR swapchain, uint64_t presentId, uint64_t timeout) const noexcept { + return fp_vkWaitForPresentKHR(device, swapchain, presentId, timeout); + } +#endif +#if (defined(VK_VERSION_1_2)) + VkResult waitSemaphores(const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout) const noexcept { + return fp_vkWaitSemaphores(device, pWaitInfo, timeout); + } +#endif +#if (defined(VK_KHR_timeline_semaphore)) + VkResult waitSemaphoresKHR(const VkSemaphoreWaitInfoKHR* pWaitInfo, uint64_t timeout) const noexcept { + return fp_vkWaitSemaphoresKHR(device, pWaitInfo, timeout); + } +#endif +#if (defined(VK_KHR_acceleration_structure)) + VkResult writeAccelerationStructuresPropertiesKHR(uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride) const noexcept { + return fp_vkWriteAccelerationStructuresPropertiesKHR(device, accelerationStructureCount, pAccelerationStructures, queryType, dataSize, pData, stride); + } +#endif +#if (defined(VK_EXT_opacity_micromap)) + VkResult writeMicromapsPropertiesEXT(uint32_t micromapCount, const VkMicromapEXT* pMicromaps, VkQueryType queryType, size_t dataSize, void* pData, size_t stride) const noexcept { + return fp_vkWriteMicromapsPropertiesEXT(device, micromapCount, pMicromaps, queryType, dataSize, pData, stride); + } +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + PFN_vkAcquireFullScreenExclusiveModeEXT fp_vkAcquireFullScreenExclusiveModeEXT = nullptr; +#else + void * fp_vkAcquireFullScreenExclusiveModeEXT{}; +#endif +#if (defined(VK_OHOS_native_buffer)) + PFN_vkAcquireImageOHOS fp_vkAcquireImageOHOS = nullptr; +#else + void * fp_vkAcquireImageOHOS{}; +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + PFN_vkAcquireNextImage2KHR fp_vkAcquireNextImage2KHR = nullptr; +#else + void * fp_vkAcquireNextImage2KHR{}; +#endif +#if (defined(VK_KHR_swapchain)) + PFN_vkAcquireNextImageKHR fp_vkAcquireNextImageKHR = nullptr; +#else + void * fp_vkAcquireNextImageKHR{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkAcquirePerformanceConfigurationINTEL fp_vkAcquirePerformanceConfigurationINTEL = nullptr; +#else + void * fp_vkAcquirePerformanceConfigurationINTEL{}; +#endif +#if (defined(VK_KHR_performance_query)) + PFN_vkAcquireProfilingLockKHR fp_vkAcquireProfilingLockKHR = nullptr; +#else + void * fp_vkAcquireProfilingLockKHR{}; +#endif + PFN_vkAllocateCommandBuffers fp_vkAllocateCommandBuffers = nullptr; + PFN_vkAllocateDescriptorSets fp_vkAllocateDescriptorSets = nullptr; + PFN_vkAllocateMemory fp_vkAllocateMemory = nullptr; +#if (defined(VK_AMD_anti_lag)) + PFN_vkAntiLagUpdateAMD fp_vkAntiLagUpdateAMD = nullptr; +#else + void * fp_vkAntiLagUpdateAMD{}; +#endif + PFN_vkBeginCommandBuffer fp_vkBeginCommandBuffer = nullptr; +#if (defined(VK_NV_ray_tracing)) + PFN_vkBindAccelerationStructureMemoryNV fp_vkBindAccelerationStructureMemoryNV = nullptr; +#else + void * fp_vkBindAccelerationStructureMemoryNV{}; +#endif + PFN_vkBindBufferMemory fp_vkBindBufferMemory = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkBindBufferMemory2 fp_vkBindBufferMemory2 = nullptr; +#else + void * fp_vkBindBufferMemory2{}; +#endif +#if (defined(VK_KHR_bind_memory2)) + PFN_vkBindBufferMemory2KHR fp_vkBindBufferMemory2KHR = nullptr; +#else + void * fp_vkBindBufferMemory2KHR{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkBindDataGraphPipelineSessionMemoryARM fp_vkBindDataGraphPipelineSessionMemoryARM = nullptr; +#else + void * fp_vkBindDataGraphPipelineSessionMemoryARM{}; +#endif + PFN_vkBindImageMemory fp_vkBindImageMemory = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkBindImageMemory2 fp_vkBindImageMemory2 = nullptr; +#else + void * fp_vkBindImageMemory2{}; +#endif +#if (defined(VK_KHR_bind_memory2)) + PFN_vkBindImageMemory2KHR fp_vkBindImageMemory2KHR = nullptr; +#else + void * fp_vkBindImageMemory2KHR{}; +#endif +#if (defined(VK_NV_optical_flow)) + PFN_vkBindOpticalFlowSessionImageNV fp_vkBindOpticalFlowSessionImageNV = nullptr; +#else + void * fp_vkBindOpticalFlowSessionImageNV{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkBindTensorMemoryARM fp_vkBindTensorMemoryARM = nullptr; +#else + void * fp_vkBindTensorMemoryARM{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkBindVideoSessionMemoryKHR fp_vkBindVideoSessionMemoryKHR = nullptr; +#else + void * fp_vkBindVideoSessionMemoryKHR{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkBuildAccelerationStructuresKHR fp_vkBuildAccelerationStructuresKHR = nullptr; +#else + void * fp_vkBuildAccelerationStructuresKHR{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkBuildMicromapsEXT fp_vkBuildMicromapsEXT = nullptr; +#else + void * fp_vkBuildMicromapsEXT{}; +#endif +#if (defined(VK_EXT_conditional_rendering)) + PFN_vkCmdBeginConditionalRenderingEXT fp_vkCmdBeginConditionalRenderingEXT = nullptr; +#else + void * fp_vkCmdBeginConditionalRenderingEXT{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkCmdBeginDebugUtilsLabelEXT fp_vkCmdBeginDebugUtilsLabelEXT = nullptr; +#else + void * fp_vkCmdBeginDebugUtilsLabelEXT{}; +#endif +#if (defined(VK_QCOM_tile_shading)) + PFN_vkCmdBeginPerTileExecutionQCOM fp_vkCmdBeginPerTileExecutionQCOM = nullptr; +#else + void * fp_vkCmdBeginPerTileExecutionQCOM{}; +#endif + PFN_vkCmdBeginQuery fp_vkCmdBeginQuery = nullptr; +#if (defined(VK_EXT_transform_feedback)) + PFN_vkCmdBeginQueryIndexedEXT fp_vkCmdBeginQueryIndexedEXT = nullptr; +#else + void * fp_vkCmdBeginQueryIndexedEXT{}; +#endif + PFN_vkCmdBeginRenderPass fp_vkCmdBeginRenderPass = nullptr; +#if (defined(VK_VERSION_1_2)) + PFN_vkCmdBeginRenderPass2 fp_vkCmdBeginRenderPass2 = nullptr; +#else + void * fp_vkCmdBeginRenderPass2{}; +#endif +#if (defined(VK_KHR_create_renderpass2)) + PFN_vkCmdBeginRenderPass2KHR fp_vkCmdBeginRenderPass2KHR = nullptr; +#else + void * fp_vkCmdBeginRenderPass2KHR{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdBeginRendering fp_vkCmdBeginRendering = nullptr; +#else + void * fp_vkCmdBeginRendering{}; +#endif +#if (defined(VK_KHR_dynamic_rendering)) + PFN_vkCmdBeginRenderingKHR fp_vkCmdBeginRenderingKHR = nullptr; +#else + void * fp_vkCmdBeginRenderingKHR{}; +#endif +#if (defined(VK_EXT_transform_feedback)) + PFN_vkCmdBeginTransformFeedbackEXT fp_vkCmdBeginTransformFeedbackEXT = nullptr; +#else + void * fp_vkCmdBeginTransformFeedbackEXT{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkCmdBeginVideoCodingKHR fp_vkCmdBeginVideoCodingKHR = nullptr; +#else + void * fp_vkCmdBeginVideoCodingKHR{}; +#endif +#if (defined(VK_KHR_maintenance6)) + PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT fp_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT = nullptr; +#else + void * fp_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT fp_vkCmdBindDescriptorBufferEmbeddedSamplersEXT = nullptr; +#else + void * fp_vkCmdBindDescriptorBufferEmbeddedSamplersEXT{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkCmdBindDescriptorBuffersEXT fp_vkCmdBindDescriptorBuffersEXT = nullptr; +#else + void * fp_vkCmdBindDescriptorBuffersEXT{}; +#endif + PFN_vkCmdBindDescriptorSets fp_vkCmdBindDescriptorSets = nullptr; +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdBindDescriptorSets2 fp_vkCmdBindDescriptorSets2 = nullptr; +#else + void * fp_vkCmdBindDescriptorSets2{}; +#endif +#if (defined(VK_KHR_maintenance6)) + PFN_vkCmdBindDescriptorSets2KHR fp_vkCmdBindDescriptorSets2KHR = nullptr; +#else + void * fp_vkCmdBindDescriptorSets2KHR{}; +#endif + PFN_vkCmdBindIndexBuffer fp_vkCmdBindIndexBuffer = nullptr; +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdBindIndexBuffer2 fp_vkCmdBindIndexBuffer2 = nullptr; +#else + void * fp_vkCmdBindIndexBuffer2{}; +#endif +#if (defined(VK_KHR_maintenance5)) + PFN_vkCmdBindIndexBuffer2KHR fp_vkCmdBindIndexBuffer2KHR = nullptr; +#else + void * fp_vkCmdBindIndexBuffer2KHR{}; +#endif +#if (defined(VK_HUAWEI_invocation_mask)) + PFN_vkCmdBindInvocationMaskHUAWEI fp_vkCmdBindInvocationMaskHUAWEI = nullptr; +#else + void * fp_vkCmdBindInvocationMaskHUAWEI{}; +#endif + PFN_vkCmdBindPipeline fp_vkCmdBindPipeline = nullptr; +#if (defined(VK_NV_device_generated_commands)) + PFN_vkCmdBindPipelineShaderGroupNV fp_vkCmdBindPipelineShaderGroupNV = nullptr; +#else + void * fp_vkCmdBindPipelineShaderGroupNV{}; +#endif +#if (defined(VK_EXT_shader_object)) + PFN_vkCmdBindShadersEXT fp_vkCmdBindShadersEXT = nullptr; +#else + void * fp_vkCmdBindShadersEXT{}; +#endif +#if (defined(VK_NV_shading_rate_image)) + PFN_vkCmdBindShadingRateImageNV fp_vkCmdBindShadingRateImageNV = nullptr; +#else + void * fp_vkCmdBindShadingRateImageNV{}; +#endif +#if (defined(VK_QCOM_tile_memory_heap)) + PFN_vkCmdBindTileMemoryQCOM fp_vkCmdBindTileMemoryQCOM = nullptr; +#else + void * fp_vkCmdBindTileMemoryQCOM{}; +#endif +#if (defined(VK_EXT_transform_feedback)) + PFN_vkCmdBindTransformFeedbackBuffersEXT fp_vkCmdBindTransformFeedbackBuffersEXT = nullptr; +#else + void * fp_vkCmdBindTransformFeedbackBuffersEXT{}; +#endif + PFN_vkCmdBindVertexBuffers fp_vkCmdBindVertexBuffers = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdBindVertexBuffers2 fp_vkCmdBindVertexBuffers2 = nullptr; +#else + void * fp_vkCmdBindVertexBuffers2{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdBindVertexBuffers2EXT fp_vkCmdBindVertexBuffers2EXT = nullptr; +#else + void * fp_vkCmdBindVertexBuffers2EXT{}; +#endif + PFN_vkCmdBlitImage fp_vkCmdBlitImage = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdBlitImage2 fp_vkCmdBlitImage2 = nullptr; +#else + void * fp_vkCmdBlitImage2{}; +#endif +#if (defined(VK_KHR_copy_commands2)) + PFN_vkCmdBlitImage2KHR fp_vkCmdBlitImage2KHR = nullptr; +#else + void * fp_vkCmdBlitImage2KHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkCmdBuildAccelerationStructureNV fp_vkCmdBuildAccelerationStructureNV = nullptr; +#else + void * fp_vkCmdBuildAccelerationStructureNV{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCmdBuildAccelerationStructuresIndirectKHR fp_vkCmdBuildAccelerationStructuresIndirectKHR = nullptr; +#else + void * fp_vkCmdBuildAccelerationStructuresIndirectKHR{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCmdBuildAccelerationStructuresKHR fp_vkCmdBuildAccelerationStructuresKHR = nullptr; +#else + void * fp_vkCmdBuildAccelerationStructuresKHR{}; +#endif +#if (defined(VK_NV_cluster_acceleration_structure)) + PFN_vkCmdBuildClusterAccelerationStructureIndirectNV fp_vkCmdBuildClusterAccelerationStructureIndirectNV = nullptr; +#else + void * fp_vkCmdBuildClusterAccelerationStructureIndirectNV{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCmdBuildMicromapsEXT fp_vkCmdBuildMicromapsEXT = nullptr; +#else + void * fp_vkCmdBuildMicromapsEXT{}; +#endif +#if (defined(VK_NV_partitioned_acceleration_structure)) + PFN_vkCmdBuildPartitionedAccelerationStructuresNV fp_vkCmdBuildPartitionedAccelerationStructuresNV = nullptr; +#else + void * fp_vkCmdBuildPartitionedAccelerationStructuresNV{}; +#endif + PFN_vkCmdClearAttachments fp_vkCmdClearAttachments = nullptr; + PFN_vkCmdClearColorImage fp_vkCmdClearColorImage = nullptr; + PFN_vkCmdClearDepthStencilImage fp_vkCmdClearDepthStencilImage = nullptr; +#if (defined(VK_KHR_video_queue)) + PFN_vkCmdControlVideoCodingKHR fp_vkCmdControlVideoCodingKHR = nullptr; +#else + void * fp_vkCmdControlVideoCodingKHR{}; +#endif +#if (defined(VK_NV_cooperative_vector)) + PFN_vkCmdConvertCooperativeVectorMatrixNV fp_vkCmdConvertCooperativeVectorMatrixNV = nullptr; +#else + void * fp_vkCmdConvertCooperativeVectorMatrixNV{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCmdCopyAccelerationStructureKHR fp_vkCmdCopyAccelerationStructureKHR = nullptr; +#else + void * fp_vkCmdCopyAccelerationStructureKHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkCmdCopyAccelerationStructureNV fp_vkCmdCopyAccelerationStructureNV = nullptr; +#else + void * fp_vkCmdCopyAccelerationStructureNV{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCmdCopyAccelerationStructureToMemoryKHR fp_vkCmdCopyAccelerationStructureToMemoryKHR = nullptr; +#else + void * fp_vkCmdCopyAccelerationStructureToMemoryKHR{}; +#endif + PFN_vkCmdCopyBuffer fp_vkCmdCopyBuffer = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdCopyBuffer2 fp_vkCmdCopyBuffer2 = nullptr; +#else + void * fp_vkCmdCopyBuffer2{}; +#endif +#if (defined(VK_KHR_copy_commands2)) + PFN_vkCmdCopyBuffer2KHR fp_vkCmdCopyBuffer2KHR = nullptr; +#else + void * fp_vkCmdCopyBuffer2KHR{}; +#endif + PFN_vkCmdCopyBufferToImage fp_vkCmdCopyBufferToImage = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdCopyBufferToImage2 fp_vkCmdCopyBufferToImage2 = nullptr; +#else + void * fp_vkCmdCopyBufferToImage2{}; +#endif +#if (defined(VK_KHR_copy_commands2)) + PFN_vkCmdCopyBufferToImage2KHR fp_vkCmdCopyBufferToImage2KHR = nullptr; +#else + void * fp_vkCmdCopyBufferToImage2KHR{}; +#endif + PFN_vkCmdCopyImage fp_vkCmdCopyImage = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdCopyImage2 fp_vkCmdCopyImage2 = nullptr; +#else + void * fp_vkCmdCopyImage2{}; +#endif +#if (defined(VK_KHR_copy_commands2)) + PFN_vkCmdCopyImage2KHR fp_vkCmdCopyImage2KHR = nullptr; +#else + void * fp_vkCmdCopyImage2KHR{}; +#endif + PFN_vkCmdCopyImageToBuffer fp_vkCmdCopyImageToBuffer = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdCopyImageToBuffer2 fp_vkCmdCopyImageToBuffer2 = nullptr; +#else + void * fp_vkCmdCopyImageToBuffer2{}; +#endif +#if (defined(VK_KHR_copy_commands2)) + PFN_vkCmdCopyImageToBuffer2KHR fp_vkCmdCopyImageToBuffer2KHR = nullptr; +#else + void * fp_vkCmdCopyImageToBuffer2KHR{}; +#endif +#if (defined(VK_KHR_copy_memory_indirect)) + PFN_vkCmdCopyMemoryIndirectKHR fp_vkCmdCopyMemoryIndirectKHR = nullptr; +#else + void * fp_vkCmdCopyMemoryIndirectKHR{}; +#endif +#if (defined(VK_NV_copy_memory_indirect)) + PFN_vkCmdCopyMemoryIndirectNV fp_vkCmdCopyMemoryIndirectNV = nullptr; +#else + void * fp_vkCmdCopyMemoryIndirectNV{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCmdCopyMemoryToAccelerationStructureKHR fp_vkCmdCopyMemoryToAccelerationStructureKHR = nullptr; +#else + void * fp_vkCmdCopyMemoryToAccelerationStructureKHR{}; +#endif +#if (defined(VK_KHR_copy_memory_indirect)) + PFN_vkCmdCopyMemoryToImageIndirectKHR fp_vkCmdCopyMemoryToImageIndirectKHR = nullptr; +#else + void * fp_vkCmdCopyMemoryToImageIndirectKHR{}; +#endif +#if (defined(VK_NV_copy_memory_indirect)) + PFN_vkCmdCopyMemoryToImageIndirectNV fp_vkCmdCopyMemoryToImageIndirectNV = nullptr; +#else + void * fp_vkCmdCopyMemoryToImageIndirectNV{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCmdCopyMemoryToMicromapEXT fp_vkCmdCopyMemoryToMicromapEXT = nullptr; +#else + void * fp_vkCmdCopyMemoryToMicromapEXT{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCmdCopyMicromapEXT fp_vkCmdCopyMicromapEXT = nullptr; +#else + void * fp_vkCmdCopyMicromapEXT{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCmdCopyMicromapToMemoryEXT fp_vkCmdCopyMicromapToMemoryEXT = nullptr; +#else + void * fp_vkCmdCopyMicromapToMemoryEXT{}; +#endif + PFN_vkCmdCopyQueryPoolResults fp_vkCmdCopyQueryPoolResults = nullptr; +#if (defined(VK_ARM_tensors)) + PFN_vkCmdCopyTensorARM fp_vkCmdCopyTensorARM = nullptr; +#else + void * fp_vkCmdCopyTensorARM{}; +#endif +#if (defined(VK_NVX_binary_import)) + PFN_vkCmdCuLaunchKernelNVX fp_vkCmdCuLaunchKernelNVX = nullptr; +#else + void * fp_vkCmdCuLaunchKernelNVX{}; +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + PFN_vkCmdCudaLaunchKernelNV fp_vkCmdCudaLaunchKernelNV = nullptr; +#else + void * fp_vkCmdCudaLaunchKernelNV{}; +#endif +#if (defined(VK_EXT_debug_marker)) + PFN_vkCmdDebugMarkerBeginEXT fp_vkCmdDebugMarkerBeginEXT = nullptr; +#else + void * fp_vkCmdDebugMarkerBeginEXT{}; +#endif +#if (defined(VK_EXT_debug_marker)) + PFN_vkCmdDebugMarkerEndEXT fp_vkCmdDebugMarkerEndEXT = nullptr; +#else + void * fp_vkCmdDebugMarkerEndEXT{}; +#endif +#if (defined(VK_EXT_debug_marker)) + PFN_vkCmdDebugMarkerInsertEXT fp_vkCmdDebugMarkerInsertEXT = nullptr; +#else + void * fp_vkCmdDebugMarkerInsertEXT{}; +#endif +#if (defined(VK_KHR_video_decode_queue)) + PFN_vkCmdDecodeVideoKHR fp_vkCmdDecodeVideoKHR = nullptr; +#else + void * fp_vkCmdDecodeVideoKHR{}; +#endif +#if (defined(VK_EXT_memory_decompression)) + PFN_vkCmdDecompressMemoryEXT fp_vkCmdDecompressMemoryEXT = nullptr; +#else + void * fp_vkCmdDecompressMemoryEXT{}; +#endif +#if (defined(VK_EXT_memory_decompression)) + PFN_vkCmdDecompressMemoryIndirectCountEXT fp_vkCmdDecompressMemoryIndirectCountEXT = nullptr; +#else + void * fp_vkCmdDecompressMemoryIndirectCountEXT{}; +#endif +#if (defined(VK_NV_memory_decompression)) + PFN_vkCmdDecompressMemoryIndirectCountNV fp_vkCmdDecompressMemoryIndirectCountNV = nullptr; +#else + void * fp_vkCmdDecompressMemoryIndirectCountNV{}; +#endif +#if (defined(VK_NV_memory_decompression)) + PFN_vkCmdDecompressMemoryNV fp_vkCmdDecompressMemoryNV = nullptr; +#else + void * fp_vkCmdDecompressMemoryNV{}; +#endif + PFN_vkCmdDispatch fp_vkCmdDispatch = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkCmdDispatchBase fp_vkCmdDispatchBase = nullptr; +#else + void * fp_vkCmdDispatchBase{}; +#endif +#if (defined(VK_KHR_device_group)) + PFN_vkCmdDispatchBaseKHR fp_vkCmdDispatchBaseKHR = nullptr; +#else + void * fp_vkCmdDispatchBaseKHR{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkCmdDispatchDataGraphARM fp_vkCmdDispatchDataGraphARM = nullptr; +#else + void * fp_vkCmdDispatchDataGraphARM{}; +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + PFN_vkCmdDispatchGraphAMDX fp_vkCmdDispatchGraphAMDX = nullptr; +#else + void * fp_vkCmdDispatchGraphAMDX{}; +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + PFN_vkCmdDispatchGraphIndirectAMDX fp_vkCmdDispatchGraphIndirectAMDX = nullptr; +#else + void * fp_vkCmdDispatchGraphIndirectAMDX{}; +#endif +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + PFN_vkCmdDispatchGraphIndirectCountAMDX fp_vkCmdDispatchGraphIndirectCountAMDX = nullptr; +#else + void * fp_vkCmdDispatchGraphIndirectCountAMDX{}; +#endif + PFN_vkCmdDispatchIndirect fp_vkCmdDispatchIndirect = nullptr; +#if (defined(VK_QCOM_tile_shading)) && VK_HEADER_VERSION >= 316 + PFN_vkCmdDispatchTileQCOM fp_vkCmdDispatchTileQCOM = nullptr; +#else + void * fp_vkCmdDispatchTileQCOM{}; +#endif + PFN_vkCmdDraw fp_vkCmdDraw = nullptr; +#if (defined(VK_HUAWEI_cluster_culling_shader)) + PFN_vkCmdDrawClusterHUAWEI fp_vkCmdDrawClusterHUAWEI = nullptr; +#else + void * fp_vkCmdDrawClusterHUAWEI{}; +#endif +#if (defined(VK_HUAWEI_cluster_culling_shader)) + PFN_vkCmdDrawClusterIndirectHUAWEI fp_vkCmdDrawClusterIndirectHUAWEI = nullptr; +#else + void * fp_vkCmdDrawClusterIndirectHUAWEI{}; +#endif + PFN_vkCmdDrawIndexed fp_vkCmdDrawIndexed = nullptr; + PFN_vkCmdDrawIndexedIndirect fp_vkCmdDrawIndexedIndirect = nullptr; +#if (defined(VK_VERSION_1_2)) + PFN_vkCmdDrawIndexedIndirectCount fp_vkCmdDrawIndexedIndirectCount = nullptr; +#else + void * fp_vkCmdDrawIndexedIndirectCount{}; +#endif +#if (defined(VK_AMD_draw_indirect_count)) + PFN_vkCmdDrawIndexedIndirectCountAMD fp_vkCmdDrawIndexedIndirectCountAMD = nullptr; +#else + void * fp_vkCmdDrawIndexedIndirectCountAMD{}; +#endif +#if (defined(VK_KHR_draw_indirect_count)) + PFN_vkCmdDrawIndexedIndirectCountKHR fp_vkCmdDrawIndexedIndirectCountKHR = nullptr; +#else + void * fp_vkCmdDrawIndexedIndirectCountKHR{}; +#endif + PFN_vkCmdDrawIndirect fp_vkCmdDrawIndirect = nullptr; +#if (defined(VK_EXT_transform_feedback)) + PFN_vkCmdDrawIndirectByteCountEXT fp_vkCmdDrawIndirectByteCountEXT = nullptr; +#else + void * fp_vkCmdDrawIndirectByteCountEXT{}; +#endif +#if (defined(VK_VERSION_1_2)) + PFN_vkCmdDrawIndirectCount fp_vkCmdDrawIndirectCount = nullptr; +#else + void * fp_vkCmdDrawIndirectCount{}; +#endif +#if (defined(VK_AMD_draw_indirect_count)) + PFN_vkCmdDrawIndirectCountAMD fp_vkCmdDrawIndirectCountAMD = nullptr; +#else + void * fp_vkCmdDrawIndirectCountAMD{}; +#endif +#if (defined(VK_KHR_draw_indirect_count)) + PFN_vkCmdDrawIndirectCountKHR fp_vkCmdDrawIndirectCountKHR = nullptr; +#else + void * fp_vkCmdDrawIndirectCountKHR{}; +#endif +#if (defined(VK_EXT_mesh_shader)) + PFN_vkCmdDrawMeshTasksEXT fp_vkCmdDrawMeshTasksEXT = nullptr; +#else + void * fp_vkCmdDrawMeshTasksEXT{}; +#endif +#if (defined(VK_EXT_mesh_shader)) + PFN_vkCmdDrawMeshTasksIndirectCountEXT fp_vkCmdDrawMeshTasksIndirectCountEXT = nullptr; +#else + void * fp_vkCmdDrawMeshTasksIndirectCountEXT{}; +#endif +#if (defined(VK_NV_mesh_shader)) + PFN_vkCmdDrawMeshTasksIndirectCountNV fp_vkCmdDrawMeshTasksIndirectCountNV = nullptr; +#else + void * fp_vkCmdDrawMeshTasksIndirectCountNV{}; +#endif +#if (defined(VK_EXT_mesh_shader)) + PFN_vkCmdDrawMeshTasksIndirectEXT fp_vkCmdDrawMeshTasksIndirectEXT = nullptr; +#else + void * fp_vkCmdDrawMeshTasksIndirectEXT{}; +#endif +#if (defined(VK_NV_mesh_shader)) + PFN_vkCmdDrawMeshTasksIndirectNV fp_vkCmdDrawMeshTasksIndirectNV = nullptr; +#else + void * fp_vkCmdDrawMeshTasksIndirectNV{}; +#endif +#if (defined(VK_NV_mesh_shader)) + PFN_vkCmdDrawMeshTasksNV fp_vkCmdDrawMeshTasksNV = nullptr; +#else + void * fp_vkCmdDrawMeshTasksNV{}; +#endif +#if (defined(VK_EXT_multi_draw)) + PFN_vkCmdDrawMultiEXT fp_vkCmdDrawMultiEXT = nullptr; +#else + void * fp_vkCmdDrawMultiEXT{}; +#endif +#if (defined(VK_EXT_multi_draw)) + PFN_vkCmdDrawMultiIndexedEXT fp_vkCmdDrawMultiIndexedEXT = nullptr; +#else + void * fp_vkCmdDrawMultiIndexedEXT{}; +#endif +#if (defined(VK_KHR_video_encode_queue)) + PFN_vkCmdEncodeVideoKHR fp_vkCmdEncodeVideoKHR = nullptr; +#else + void * fp_vkCmdEncodeVideoKHR{}; +#endif +#if (defined(VK_EXT_conditional_rendering)) + PFN_vkCmdEndConditionalRenderingEXT fp_vkCmdEndConditionalRenderingEXT = nullptr; +#else + void * fp_vkCmdEndConditionalRenderingEXT{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkCmdEndDebugUtilsLabelEXT fp_vkCmdEndDebugUtilsLabelEXT = nullptr; +#else + void * fp_vkCmdEndDebugUtilsLabelEXT{}; +#endif +#if (defined(VK_QCOM_tile_shading)) + PFN_vkCmdEndPerTileExecutionQCOM fp_vkCmdEndPerTileExecutionQCOM = nullptr; +#else + void * fp_vkCmdEndPerTileExecutionQCOM{}; +#endif + PFN_vkCmdEndQuery fp_vkCmdEndQuery = nullptr; +#if (defined(VK_EXT_transform_feedback)) + PFN_vkCmdEndQueryIndexedEXT fp_vkCmdEndQueryIndexedEXT = nullptr; +#else + void * fp_vkCmdEndQueryIndexedEXT{}; +#endif + PFN_vkCmdEndRenderPass fp_vkCmdEndRenderPass = nullptr; +#if (defined(VK_VERSION_1_2)) + PFN_vkCmdEndRenderPass2 fp_vkCmdEndRenderPass2 = nullptr; +#else + void * fp_vkCmdEndRenderPass2{}; +#endif +#if (defined(VK_KHR_create_renderpass2)) + PFN_vkCmdEndRenderPass2KHR fp_vkCmdEndRenderPass2KHR = nullptr; +#else + void * fp_vkCmdEndRenderPass2KHR{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdEndRendering fp_vkCmdEndRendering = nullptr; +#else + void * fp_vkCmdEndRendering{}; +#endif +#if (defined(VK_EXT_fragment_density_map_offset)) + PFN_vkCmdEndRendering2EXT fp_vkCmdEndRendering2EXT = nullptr; +#else + void * fp_vkCmdEndRendering2EXT{}; +#endif +#if (defined(VK_KHR_maintenance10)) + PFN_vkCmdEndRendering2KHR fp_vkCmdEndRendering2KHR = nullptr; +#else + void * fp_vkCmdEndRendering2KHR{}; +#endif +#if (defined(VK_KHR_dynamic_rendering)) + PFN_vkCmdEndRenderingKHR fp_vkCmdEndRenderingKHR = nullptr; +#else + void * fp_vkCmdEndRenderingKHR{}; +#endif +#if (defined(VK_EXT_transform_feedback)) + PFN_vkCmdEndTransformFeedbackEXT fp_vkCmdEndTransformFeedbackEXT = nullptr; +#else + void * fp_vkCmdEndTransformFeedbackEXT{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkCmdEndVideoCodingKHR fp_vkCmdEndVideoCodingKHR = nullptr; +#else + void * fp_vkCmdEndVideoCodingKHR{}; +#endif + PFN_vkCmdExecuteCommands fp_vkCmdExecuteCommands = nullptr; +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkCmdExecuteGeneratedCommandsEXT fp_vkCmdExecuteGeneratedCommandsEXT = nullptr; +#else + void * fp_vkCmdExecuteGeneratedCommandsEXT{}; +#endif +#if (defined(VK_NV_device_generated_commands)) + PFN_vkCmdExecuteGeneratedCommandsNV fp_vkCmdExecuteGeneratedCommandsNV = nullptr; +#else + void * fp_vkCmdExecuteGeneratedCommandsNV{}; +#endif + PFN_vkCmdFillBuffer fp_vkCmdFillBuffer = nullptr; +#if (defined(VK_AMDX_shader_enqueue)) && VK_HEADER_VERSION >= 298 + PFN_vkCmdInitializeGraphScratchMemoryAMDX fp_vkCmdInitializeGraphScratchMemoryAMDX = nullptr; +#else + void * fp_vkCmdInitializeGraphScratchMemoryAMDX{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkCmdInsertDebugUtilsLabelEXT fp_vkCmdInsertDebugUtilsLabelEXT = nullptr; +#else + void * fp_vkCmdInsertDebugUtilsLabelEXT{}; +#endif + PFN_vkCmdNextSubpass fp_vkCmdNextSubpass = nullptr; +#if (defined(VK_VERSION_1_2)) + PFN_vkCmdNextSubpass2 fp_vkCmdNextSubpass2 = nullptr; +#else + void * fp_vkCmdNextSubpass2{}; +#endif +#if (defined(VK_KHR_create_renderpass2)) + PFN_vkCmdNextSubpass2KHR fp_vkCmdNextSubpass2KHR = nullptr; +#else + void * fp_vkCmdNextSubpass2KHR{}; +#endif +#if (defined(VK_NV_optical_flow)) + PFN_vkCmdOpticalFlowExecuteNV fp_vkCmdOpticalFlowExecuteNV = nullptr; +#else + void * fp_vkCmdOpticalFlowExecuteNV{}; +#endif + PFN_vkCmdPipelineBarrier fp_vkCmdPipelineBarrier = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdPipelineBarrier2 fp_vkCmdPipelineBarrier2 = nullptr; +#else + void * fp_vkCmdPipelineBarrier2{}; +#endif +#if (defined(VK_KHR_synchronization2)) + PFN_vkCmdPipelineBarrier2KHR fp_vkCmdPipelineBarrier2KHR = nullptr; +#else + void * fp_vkCmdPipelineBarrier2KHR{}; +#endif +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkCmdPreprocessGeneratedCommandsEXT fp_vkCmdPreprocessGeneratedCommandsEXT = nullptr; +#else + void * fp_vkCmdPreprocessGeneratedCommandsEXT{}; +#endif +#if (defined(VK_NV_device_generated_commands)) + PFN_vkCmdPreprocessGeneratedCommandsNV fp_vkCmdPreprocessGeneratedCommandsNV = nullptr; +#else + void * fp_vkCmdPreprocessGeneratedCommandsNV{}; +#endif + PFN_vkCmdPushConstants fp_vkCmdPushConstants = nullptr; +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdPushConstants2 fp_vkCmdPushConstants2 = nullptr; +#else + void * fp_vkCmdPushConstants2{}; +#endif +#if (defined(VK_KHR_maintenance6)) + PFN_vkCmdPushConstants2KHR fp_vkCmdPushConstants2KHR = nullptr; +#else + void * fp_vkCmdPushConstants2KHR{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdPushDescriptorSet fp_vkCmdPushDescriptorSet = nullptr; +#else + void * fp_vkCmdPushDescriptorSet{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdPushDescriptorSet2 fp_vkCmdPushDescriptorSet2 = nullptr; +#else + void * fp_vkCmdPushDescriptorSet2{}; +#endif +#if (defined(VK_KHR_maintenance6)) + PFN_vkCmdPushDescriptorSet2KHR fp_vkCmdPushDescriptorSet2KHR = nullptr; +#else + void * fp_vkCmdPushDescriptorSet2KHR{}; +#endif +#if (defined(VK_KHR_push_descriptor)) + PFN_vkCmdPushDescriptorSetKHR fp_vkCmdPushDescriptorSetKHR = nullptr; +#else + void * fp_vkCmdPushDescriptorSetKHR{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdPushDescriptorSetWithTemplate fp_vkCmdPushDescriptorSetWithTemplate = nullptr; +#else + void * fp_vkCmdPushDescriptorSetWithTemplate{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdPushDescriptorSetWithTemplate2 fp_vkCmdPushDescriptorSetWithTemplate2 = nullptr; +#else + void * fp_vkCmdPushDescriptorSetWithTemplate2{}; +#endif +#if (defined(VK_KHR_maintenance6)) + PFN_vkCmdPushDescriptorSetWithTemplate2KHR fp_vkCmdPushDescriptorSetWithTemplate2KHR = nullptr; +#else + void * fp_vkCmdPushDescriptorSetWithTemplate2KHR{}; +#endif +#if (defined(VK_KHR_push_descriptor) || defined(VK_KHR_descriptor_update_template)) + PFN_vkCmdPushDescriptorSetWithTemplateKHR fp_vkCmdPushDescriptorSetWithTemplateKHR = nullptr; +#else + void * fp_vkCmdPushDescriptorSetWithTemplateKHR{}; +#endif + PFN_vkCmdResetEvent fp_vkCmdResetEvent = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdResetEvent2 fp_vkCmdResetEvent2 = nullptr; +#else + void * fp_vkCmdResetEvent2{}; +#endif +#if (defined(VK_KHR_synchronization2)) + PFN_vkCmdResetEvent2KHR fp_vkCmdResetEvent2KHR = nullptr; +#else + void * fp_vkCmdResetEvent2KHR{}; +#endif + PFN_vkCmdResetQueryPool fp_vkCmdResetQueryPool = nullptr; + PFN_vkCmdResolveImage fp_vkCmdResolveImage = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdResolveImage2 fp_vkCmdResolveImage2 = nullptr; +#else + void * fp_vkCmdResolveImage2{}; +#endif +#if (defined(VK_KHR_copy_commands2)) + PFN_vkCmdResolveImage2KHR fp_vkCmdResolveImage2KHR = nullptr; +#else + void * fp_vkCmdResolveImage2KHR{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetAlphaToCoverageEnableEXT fp_vkCmdSetAlphaToCoverageEnableEXT = nullptr; +#else + void * fp_vkCmdSetAlphaToCoverageEnableEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetAlphaToOneEnableEXT fp_vkCmdSetAlphaToOneEnableEXT = nullptr; +#else + void * fp_vkCmdSetAlphaToOneEnableEXT{}; +#endif +#if (defined(VK_EXT_attachment_feedback_loop_dynamic_state)) + PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT fp_vkCmdSetAttachmentFeedbackLoopEnableEXT = nullptr; +#else + void * fp_vkCmdSetAttachmentFeedbackLoopEnableEXT{}; +#endif + PFN_vkCmdSetBlendConstants fp_vkCmdSetBlendConstants = nullptr; +#if (defined(VK_NV_device_diagnostic_checkpoints)) + PFN_vkCmdSetCheckpointNV fp_vkCmdSetCheckpointNV = nullptr; +#else + void * fp_vkCmdSetCheckpointNV{}; +#endif +#if (defined(VK_NV_shading_rate_image)) + PFN_vkCmdSetCoarseSampleOrderNV fp_vkCmdSetCoarseSampleOrderNV = nullptr; +#else + void * fp_vkCmdSetCoarseSampleOrderNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetColorBlendAdvancedEXT fp_vkCmdSetColorBlendAdvancedEXT = nullptr; +#else + void * fp_vkCmdSetColorBlendAdvancedEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetColorBlendEnableEXT fp_vkCmdSetColorBlendEnableEXT = nullptr; +#else + void * fp_vkCmdSetColorBlendEnableEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetColorBlendEquationEXT fp_vkCmdSetColorBlendEquationEXT = nullptr; +#else + void * fp_vkCmdSetColorBlendEquationEXT{}; +#endif +#if (defined(VK_EXT_color_write_enable)) + PFN_vkCmdSetColorWriteEnableEXT fp_vkCmdSetColorWriteEnableEXT = nullptr; +#else + void * fp_vkCmdSetColorWriteEnableEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetColorWriteMaskEXT fp_vkCmdSetColorWriteMaskEXT = nullptr; +#else + void * fp_vkCmdSetColorWriteMaskEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetConservativeRasterizationModeEXT fp_vkCmdSetConservativeRasterizationModeEXT = nullptr; +#else + void * fp_vkCmdSetConservativeRasterizationModeEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetCoverageModulationModeNV fp_vkCmdSetCoverageModulationModeNV = nullptr; +#else + void * fp_vkCmdSetCoverageModulationModeNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetCoverageModulationTableEnableNV fp_vkCmdSetCoverageModulationTableEnableNV = nullptr; +#else + void * fp_vkCmdSetCoverageModulationTableEnableNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetCoverageModulationTableNV fp_vkCmdSetCoverageModulationTableNV = nullptr; +#else + void * fp_vkCmdSetCoverageModulationTableNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetCoverageReductionModeNV fp_vkCmdSetCoverageReductionModeNV = nullptr; +#else + void * fp_vkCmdSetCoverageReductionModeNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetCoverageToColorEnableNV fp_vkCmdSetCoverageToColorEnableNV = nullptr; +#else + void * fp_vkCmdSetCoverageToColorEnableNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetCoverageToColorLocationNV fp_vkCmdSetCoverageToColorLocationNV = nullptr; +#else + void * fp_vkCmdSetCoverageToColorLocationNV{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetCullMode fp_vkCmdSetCullMode = nullptr; +#else + void * fp_vkCmdSetCullMode{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetCullModeEXT fp_vkCmdSetCullModeEXT = nullptr; +#else + void * fp_vkCmdSetCullModeEXT{}; +#endif + PFN_vkCmdSetDepthBias fp_vkCmdSetDepthBias = nullptr; +#if (defined(VK_EXT_depth_bias_control)) + PFN_vkCmdSetDepthBias2EXT fp_vkCmdSetDepthBias2EXT = nullptr; +#else + void * fp_vkCmdSetDepthBias2EXT{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetDepthBiasEnable fp_vkCmdSetDepthBiasEnable = nullptr; +#else + void * fp_vkCmdSetDepthBiasEnable{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetDepthBiasEnableEXT fp_vkCmdSetDepthBiasEnableEXT = nullptr; +#else + void * fp_vkCmdSetDepthBiasEnableEXT{}; +#endif + PFN_vkCmdSetDepthBounds fp_vkCmdSetDepthBounds = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetDepthBoundsTestEnable fp_vkCmdSetDepthBoundsTestEnable = nullptr; +#else + void * fp_vkCmdSetDepthBoundsTestEnable{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetDepthBoundsTestEnableEXT fp_vkCmdSetDepthBoundsTestEnableEXT = nullptr; +#else + void * fp_vkCmdSetDepthBoundsTestEnableEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetDepthClampEnableEXT fp_vkCmdSetDepthClampEnableEXT = nullptr; +#else + void * fp_vkCmdSetDepthClampEnableEXT{}; +#endif +#if (defined(VK_EXT_shader_object) || defined(VK_EXT_depth_clamp_control)) + PFN_vkCmdSetDepthClampRangeEXT fp_vkCmdSetDepthClampRangeEXT = nullptr; +#else + void * fp_vkCmdSetDepthClampRangeEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetDepthClipEnableEXT fp_vkCmdSetDepthClipEnableEXT = nullptr; +#else + void * fp_vkCmdSetDepthClipEnableEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetDepthClipNegativeOneToOneEXT fp_vkCmdSetDepthClipNegativeOneToOneEXT = nullptr; +#else + void * fp_vkCmdSetDepthClipNegativeOneToOneEXT{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetDepthCompareOp fp_vkCmdSetDepthCompareOp = nullptr; +#else + void * fp_vkCmdSetDepthCompareOp{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetDepthCompareOpEXT fp_vkCmdSetDepthCompareOpEXT = nullptr; +#else + void * fp_vkCmdSetDepthCompareOpEXT{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetDepthTestEnable fp_vkCmdSetDepthTestEnable = nullptr; +#else + void * fp_vkCmdSetDepthTestEnable{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetDepthTestEnableEXT fp_vkCmdSetDepthTestEnableEXT = nullptr; +#else + void * fp_vkCmdSetDepthTestEnableEXT{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetDepthWriteEnable fp_vkCmdSetDepthWriteEnable = nullptr; +#else + void * fp_vkCmdSetDepthWriteEnable{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetDepthWriteEnableEXT fp_vkCmdSetDepthWriteEnableEXT = nullptr; +#else + void * fp_vkCmdSetDepthWriteEnableEXT{}; +#endif +#if (defined(VK_KHR_maintenance6)) + PFN_vkCmdSetDescriptorBufferOffsets2EXT fp_vkCmdSetDescriptorBufferOffsets2EXT = nullptr; +#else + void * fp_vkCmdSetDescriptorBufferOffsets2EXT{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkCmdSetDescriptorBufferOffsetsEXT fp_vkCmdSetDescriptorBufferOffsetsEXT = nullptr; +#else + void * fp_vkCmdSetDescriptorBufferOffsetsEXT{}; +#endif +#if (defined(VK_VERSION_1_1)) + PFN_vkCmdSetDeviceMask fp_vkCmdSetDeviceMask = nullptr; +#else + void * fp_vkCmdSetDeviceMask{}; +#endif +#if (defined(VK_KHR_device_group)) + PFN_vkCmdSetDeviceMaskKHR fp_vkCmdSetDeviceMaskKHR = nullptr; +#else + void * fp_vkCmdSetDeviceMaskKHR{}; +#endif +#if (defined(VK_EXT_discard_rectangles)) + PFN_vkCmdSetDiscardRectangleEXT fp_vkCmdSetDiscardRectangleEXT = nullptr; +#else + void * fp_vkCmdSetDiscardRectangleEXT{}; +#endif +#if (defined(VK_EXT_discard_rectangles)) && VK_HEADER_VERSION >= 241 + PFN_vkCmdSetDiscardRectangleEnableEXT fp_vkCmdSetDiscardRectangleEnableEXT = nullptr; +#else + void * fp_vkCmdSetDiscardRectangleEnableEXT{}; +#endif +#if (defined(VK_EXT_discard_rectangles)) && VK_HEADER_VERSION >= 241 + PFN_vkCmdSetDiscardRectangleModeEXT fp_vkCmdSetDiscardRectangleModeEXT = nullptr; +#else + void * fp_vkCmdSetDiscardRectangleModeEXT{}; +#endif + PFN_vkCmdSetEvent fp_vkCmdSetEvent = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetEvent2 fp_vkCmdSetEvent2 = nullptr; +#else + void * fp_vkCmdSetEvent2{}; +#endif +#if (defined(VK_KHR_synchronization2)) + PFN_vkCmdSetEvent2KHR fp_vkCmdSetEvent2KHR = nullptr; +#else + void * fp_vkCmdSetEvent2KHR{}; +#endif +#if (defined(VK_NV_scissor_exclusive)) && VK_HEADER_VERSION >= 241 + PFN_vkCmdSetExclusiveScissorEnableNV fp_vkCmdSetExclusiveScissorEnableNV = nullptr; +#else + void * fp_vkCmdSetExclusiveScissorEnableNV{}; +#endif +#if (defined(VK_NV_scissor_exclusive)) + PFN_vkCmdSetExclusiveScissorNV fp_vkCmdSetExclusiveScissorNV = nullptr; +#else + void * fp_vkCmdSetExclusiveScissorNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT fp_vkCmdSetExtraPrimitiveOverestimationSizeEXT = nullptr; +#else + void * fp_vkCmdSetExtraPrimitiveOverestimationSizeEXT{}; +#endif +#if (defined(VK_NV_fragment_shading_rate_enums)) + PFN_vkCmdSetFragmentShadingRateEnumNV fp_vkCmdSetFragmentShadingRateEnumNV = nullptr; +#else + void * fp_vkCmdSetFragmentShadingRateEnumNV{}; +#endif +#if (defined(VK_KHR_fragment_shading_rate)) + PFN_vkCmdSetFragmentShadingRateKHR fp_vkCmdSetFragmentShadingRateKHR = nullptr; +#else + void * fp_vkCmdSetFragmentShadingRateKHR{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetFrontFace fp_vkCmdSetFrontFace = nullptr; +#else + void * fp_vkCmdSetFrontFace{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetFrontFaceEXT fp_vkCmdSetFrontFaceEXT = nullptr; +#else + void * fp_vkCmdSetFrontFaceEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetLineRasterizationModeEXT fp_vkCmdSetLineRasterizationModeEXT = nullptr; +#else + void * fp_vkCmdSetLineRasterizationModeEXT{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdSetLineStipple fp_vkCmdSetLineStipple = nullptr; +#else + void * fp_vkCmdSetLineStipple{}; +#endif +#if (defined(VK_EXT_line_rasterization)) + PFN_vkCmdSetLineStippleEXT fp_vkCmdSetLineStippleEXT = nullptr; +#else + void * fp_vkCmdSetLineStippleEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetLineStippleEnableEXT fp_vkCmdSetLineStippleEnableEXT = nullptr; +#else + void * fp_vkCmdSetLineStippleEnableEXT{}; +#endif +#if (defined(VK_KHR_line_rasterization)) + PFN_vkCmdSetLineStippleKHR fp_vkCmdSetLineStippleKHR = nullptr; +#else + void * fp_vkCmdSetLineStippleKHR{}; +#endif + PFN_vkCmdSetLineWidth fp_vkCmdSetLineWidth = nullptr; +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetLogicOpEXT fp_vkCmdSetLogicOpEXT = nullptr; +#else + void * fp_vkCmdSetLogicOpEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetLogicOpEnableEXT fp_vkCmdSetLogicOpEnableEXT = nullptr; +#else + void * fp_vkCmdSetLogicOpEnableEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetPatchControlPointsEXT fp_vkCmdSetPatchControlPointsEXT = nullptr; +#else + void * fp_vkCmdSetPatchControlPointsEXT{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkCmdSetPerformanceMarkerINTEL fp_vkCmdSetPerformanceMarkerINTEL = nullptr; +#else + void * fp_vkCmdSetPerformanceMarkerINTEL{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkCmdSetPerformanceOverrideINTEL fp_vkCmdSetPerformanceOverrideINTEL = nullptr; +#else + void * fp_vkCmdSetPerformanceOverrideINTEL{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkCmdSetPerformanceStreamMarkerINTEL fp_vkCmdSetPerformanceStreamMarkerINTEL = nullptr; +#else + void * fp_vkCmdSetPerformanceStreamMarkerINTEL{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetPolygonModeEXT fp_vkCmdSetPolygonModeEXT = nullptr; +#else + void * fp_vkCmdSetPolygonModeEXT{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetPrimitiveRestartEnable fp_vkCmdSetPrimitiveRestartEnable = nullptr; +#else + void * fp_vkCmdSetPrimitiveRestartEnable{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetPrimitiveRestartEnableEXT fp_vkCmdSetPrimitiveRestartEnableEXT = nullptr; +#else + void * fp_vkCmdSetPrimitiveRestartEnableEXT{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetPrimitiveTopology fp_vkCmdSetPrimitiveTopology = nullptr; +#else + void * fp_vkCmdSetPrimitiveTopology{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetPrimitiveTopologyEXT fp_vkCmdSetPrimitiveTopologyEXT = nullptr; +#else + void * fp_vkCmdSetPrimitiveTopologyEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetProvokingVertexModeEXT fp_vkCmdSetProvokingVertexModeEXT = nullptr; +#else + void * fp_vkCmdSetProvokingVertexModeEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetRasterizationSamplesEXT fp_vkCmdSetRasterizationSamplesEXT = nullptr; +#else + void * fp_vkCmdSetRasterizationSamplesEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetRasterizationStreamEXT fp_vkCmdSetRasterizationStreamEXT = nullptr; +#else + void * fp_vkCmdSetRasterizationStreamEXT{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetRasterizerDiscardEnable fp_vkCmdSetRasterizerDiscardEnable = nullptr; +#else + void * fp_vkCmdSetRasterizerDiscardEnable{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state2) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetRasterizerDiscardEnableEXT fp_vkCmdSetRasterizerDiscardEnableEXT = nullptr; +#else + void * fp_vkCmdSetRasterizerDiscardEnableEXT{}; +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + PFN_vkCmdSetRayTracingPipelineStackSizeKHR fp_vkCmdSetRayTracingPipelineStackSizeKHR = nullptr; +#else + void * fp_vkCmdSetRayTracingPipelineStackSizeKHR{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdSetRenderingAttachmentLocations fp_vkCmdSetRenderingAttachmentLocations = nullptr; +#else + void * fp_vkCmdSetRenderingAttachmentLocations{}; +#endif +#if (defined(VK_KHR_dynamic_rendering_local_read)) + PFN_vkCmdSetRenderingAttachmentLocationsKHR fp_vkCmdSetRenderingAttachmentLocationsKHR = nullptr; +#else + void * fp_vkCmdSetRenderingAttachmentLocationsKHR{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCmdSetRenderingInputAttachmentIndices fp_vkCmdSetRenderingInputAttachmentIndices = nullptr; +#else + void * fp_vkCmdSetRenderingInputAttachmentIndices{}; +#endif +#if (defined(VK_KHR_dynamic_rendering_local_read)) + PFN_vkCmdSetRenderingInputAttachmentIndicesKHR fp_vkCmdSetRenderingInputAttachmentIndicesKHR = nullptr; +#else + void * fp_vkCmdSetRenderingInputAttachmentIndicesKHR{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetRepresentativeFragmentTestEnableNV fp_vkCmdSetRepresentativeFragmentTestEnableNV = nullptr; +#else + void * fp_vkCmdSetRepresentativeFragmentTestEnableNV{}; +#endif +#if (defined(VK_EXT_sample_locations)) + PFN_vkCmdSetSampleLocationsEXT fp_vkCmdSetSampleLocationsEXT = nullptr; +#else + void * fp_vkCmdSetSampleLocationsEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetSampleLocationsEnableEXT fp_vkCmdSetSampleLocationsEnableEXT = nullptr; +#else + void * fp_vkCmdSetSampleLocationsEnableEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetSampleMaskEXT fp_vkCmdSetSampleMaskEXT = nullptr; +#else + void * fp_vkCmdSetSampleMaskEXT{}; +#endif + PFN_vkCmdSetScissor fp_vkCmdSetScissor = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetScissorWithCount fp_vkCmdSetScissorWithCount = nullptr; +#else + void * fp_vkCmdSetScissorWithCount{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetScissorWithCountEXT fp_vkCmdSetScissorWithCountEXT = nullptr; +#else + void * fp_vkCmdSetScissorWithCountEXT{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetShadingRateImageEnableNV fp_vkCmdSetShadingRateImageEnableNV = nullptr; +#else + void * fp_vkCmdSetShadingRateImageEnableNV{}; +#endif + PFN_vkCmdSetStencilCompareMask fp_vkCmdSetStencilCompareMask = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetStencilOp fp_vkCmdSetStencilOp = nullptr; +#else + void * fp_vkCmdSetStencilOp{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetStencilOpEXT fp_vkCmdSetStencilOpEXT = nullptr; +#else + void * fp_vkCmdSetStencilOpEXT{}; +#endif + PFN_vkCmdSetStencilReference fp_vkCmdSetStencilReference = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetStencilTestEnable fp_vkCmdSetStencilTestEnable = nullptr; +#else + void * fp_vkCmdSetStencilTestEnable{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetStencilTestEnableEXT fp_vkCmdSetStencilTestEnableEXT = nullptr; +#else + void * fp_vkCmdSetStencilTestEnableEXT{}; +#endif + PFN_vkCmdSetStencilWriteMask fp_vkCmdSetStencilWriteMask = nullptr; +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetTessellationDomainOriginEXT fp_vkCmdSetTessellationDomainOriginEXT = nullptr; +#else + void * fp_vkCmdSetTessellationDomainOriginEXT{}; +#endif +#if (defined(VK_EXT_vertex_input_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetVertexInputEXT fp_vkCmdSetVertexInputEXT = nullptr; +#else + void * fp_vkCmdSetVertexInputEXT{}; +#endif + PFN_vkCmdSetViewport fp_vkCmdSetViewport = nullptr; +#if (defined(VK_NV_shading_rate_image)) + PFN_vkCmdSetViewportShadingRatePaletteNV fp_vkCmdSetViewportShadingRatePaletteNV = nullptr; +#else + void * fp_vkCmdSetViewportShadingRatePaletteNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetViewportSwizzleNV fp_vkCmdSetViewportSwizzleNV = nullptr; +#else + void * fp_vkCmdSetViewportSwizzleNV{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state3) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetViewportWScalingEnableNV fp_vkCmdSetViewportWScalingEnableNV = nullptr; +#else + void * fp_vkCmdSetViewportWScalingEnableNV{}; +#endif +#if (defined(VK_NV_clip_space_w_scaling)) + PFN_vkCmdSetViewportWScalingNV fp_vkCmdSetViewportWScalingNV = nullptr; +#else + void * fp_vkCmdSetViewportWScalingNV{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdSetViewportWithCount fp_vkCmdSetViewportWithCount = nullptr; +#else + void * fp_vkCmdSetViewportWithCount{}; +#endif +#if (defined(VK_EXT_extended_dynamic_state) || defined(VK_EXT_shader_object)) + PFN_vkCmdSetViewportWithCountEXT fp_vkCmdSetViewportWithCountEXT = nullptr; +#else + void * fp_vkCmdSetViewportWithCountEXT{}; +#endif +#if (defined(VK_HUAWEI_subpass_shading)) + PFN_vkCmdSubpassShadingHUAWEI fp_vkCmdSubpassShadingHUAWEI = nullptr; +#else + void * fp_vkCmdSubpassShadingHUAWEI{}; +#endif +#if (defined(VK_KHR_ray_tracing_maintenance1)) + PFN_vkCmdTraceRaysIndirect2KHR fp_vkCmdTraceRaysIndirect2KHR = nullptr; +#else + void * fp_vkCmdTraceRaysIndirect2KHR{}; +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + PFN_vkCmdTraceRaysIndirectKHR fp_vkCmdTraceRaysIndirectKHR = nullptr; +#else + void * fp_vkCmdTraceRaysIndirectKHR{}; +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + PFN_vkCmdTraceRaysKHR fp_vkCmdTraceRaysKHR = nullptr; +#else + void * fp_vkCmdTraceRaysKHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkCmdTraceRaysNV fp_vkCmdTraceRaysNV = nullptr; +#else + void * fp_vkCmdTraceRaysNV{}; +#endif + PFN_vkCmdUpdateBuffer fp_vkCmdUpdateBuffer = nullptr; +#if (defined(VK_NV_device_generated_commands_compute)) + PFN_vkCmdUpdatePipelineIndirectBufferNV fp_vkCmdUpdatePipelineIndirectBufferNV = nullptr; +#else + void * fp_vkCmdUpdatePipelineIndirectBufferNV{}; +#endif + PFN_vkCmdWaitEvents fp_vkCmdWaitEvents = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdWaitEvents2 fp_vkCmdWaitEvents2 = nullptr; +#else + void * fp_vkCmdWaitEvents2{}; +#endif +#if (defined(VK_KHR_synchronization2)) + PFN_vkCmdWaitEvents2KHR fp_vkCmdWaitEvents2KHR = nullptr; +#else + void * fp_vkCmdWaitEvents2KHR{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCmdWriteAccelerationStructuresPropertiesKHR fp_vkCmdWriteAccelerationStructuresPropertiesKHR = nullptr; +#else + void * fp_vkCmdWriteAccelerationStructuresPropertiesKHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkCmdWriteAccelerationStructuresPropertiesNV fp_vkCmdWriteAccelerationStructuresPropertiesNV = nullptr; +#else + void * fp_vkCmdWriteAccelerationStructuresPropertiesNV{}; +#endif +#if (defined(VK_AMD_buffer_marker)) + PFN_vkCmdWriteBufferMarker2AMD fp_vkCmdWriteBufferMarker2AMD = nullptr; +#else + void * fp_vkCmdWriteBufferMarker2AMD{}; +#endif +#if (defined(VK_AMD_buffer_marker)) + PFN_vkCmdWriteBufferMarkerAMD fp_vkCmdWriteBufferMarkerAMD = nullptr; +#else + void * fp_vkCmdWriteBufferMarkerAMD{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCmdWriteMicromapsPropertiesEXT fp_vkCmdWriteMicromapsPropertiesEXT = nullptr; +#else + void * fp_vkCmdWriteMicromapsPropertiesEXT{}; +#endif + PFN_vkCmdWriteTimestamp fp_vkCmdWriteTimestamp = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCmdWriteTimestamp2 fp_vkCmdWriteTimestamp2 = nullptr; +#else + void * fp_vkCmdWriteTimestamp2{}; +#endif +#if (defined(VK_KHR_synchronization2)) + PFN_vkCmdWriteTimestamp2KHR fp_vkCmdWriteTimestamp2KHR = nullptr; +#else + void * fp_vkCmdWriteTimestamp2KHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkCompileDeferredNV fp_vkCompileDeferredNV = nullptr; +#else + void * fp_vkCompileDeferredNV{}; +#endif +#if (defined(VK_NV_cooperative_vector)) + PFN_vkConvertCooperativeVectorMatrixNV fp_vkConvertCooperativeVectorMatrixNV = nullptr; +#else + void * fp_vkConvertCooperativeVectorMatrixNV{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCopyAccelerationStructureKHR fp_vkCopyAccelerationStructureKHR = nullptr; +#else + void * fp_vkCopyAccelerationStructureKHR{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCopyAccelerationStructureToMemoryKHR fp_vkCopyAccelerationStructureToMemoryKHR = nullptr; +#else + void * fp_vkCopyAccelerationStructureToMemoryKHR{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCopyImageToImage fp_vkCopyImageToImage = nullptr; +#else + void * fp_vkCopyImageToImage{}; +#endif +#if (defined(VK_EXT_host_image_copy)) + PFN_vkCopyImageToImageEXT fp_vkCopyImageToImageEXT = nullptr; +#else + void * fp_vkCopyImageToImageEXT{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCopyImageToMemory fp_vkCopyImageToMemory = nullptr; +#else + void * fp_vkCopyImageToMemory{}; +#endif +#if (defined(VK_EXT_host_image_copy)) + PFN_vkCopyImageToMemoryEXT fp_vkCopyImageToMemoryEXT = nullptr; +#else + void * fp_vkCopyImageToMemoryEXT{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCopyMemoryToAccelerationStructureKHR fp_vkCopyMemoryToAccelerationStructureKHR = nullptr; +#else + void * fp_vkCopyMemoryToAccelerationStructureKHR{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkCopyMemoryToImage fp_vkCopyMemoryToImage = nullptr; +#else + void * fp_vkCopyMemoryToImage{}; +#endif +#if (defined(VK_EXT_host_image_copy)) + PFN_vkCopyMemoryToImageEXT fp_vkCopyMemoryToImageEXT = nullptr; +#else + void * fp_vkCopyMemoryToImageEXT{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCopyMemoryToMicromapEXT fp_vkCopyMemoryToMicromapEXT = nullptr; +#else + void * fp_vkCopyMemoryToMicromapEXT{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCopyMicromapEXT fp_vkCopyMicromapEXT = nullptr; +#else + void * fp_vkCopyMicromapEXT{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCopyMicromapToMemoryEXT fp_vkCopyMicromapToMemoryEXT = nullptr; +#else + void * fp_vkCopyMicromapToMemoryEXT{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkCreateAccelerationStructureKHR fp_vkCreateAccelerationStructureKHR = nullptr; +#else + void * fp_vkCreateAccelerationStructureKHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkCreateAccelerationStructureNV fp_vkCreateAccelerationStructureNV = nullptr; +#else + void * fp_vkCreateAccelerationStructureNV{}; +#endif + PFN_vkCreateBuffer fp_vkCreateBuffer = nullptr; +#if (defined(VK_FUCHSIA_buffer_collection)) + PFN_vkCreateBufferCollectionFUCHSIA fp_vkCreateBufferCollectionFUCHSIA = nullptr; +#else + void * fp_vkCreateBufferCollectionFUCHSIA{}; +#endif + PFN_vkCreateBufferView fp_vkCreateBufferView = nullptr; + PFN_vkCreateCommandPool fp_vkCreateCommandPool = nullptr; + PFN_vkCreateComputePipelines fp_vkCreateComputePipelines = nullptr; +#if (defined(VK_NVX_binary_import)) + PFN_vkCreateCuFunctionNVX fp_vkCreateCuFunctionNVX = nullptr; +#else + void * fp_vkCreateCuFunctionNVX{}; +#endif +#if (defined(VK_NVX_binary_import)) + PFN_vkCreateCuModuleNVX fp_vkCreateCuModuleNVX = nullptr; +#else + void * fp_vkCreateCuModuleNVX{}; +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + PFN_vkCreateCudaFunctionNV fp_vkCreateCudaFunctionNV = nullptr; +#else + void * fp_vkCreateCudaFunctionNV{}; +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + PFN_vkCreateCudaModuleNV fp_vkCreateCudaModuleNV = nullptr; +#else + void * fp_vkCreateCudaModuleNV{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkCreateDataGraphPipelineSessionARM fp_vkCreateDataGraphPipelineSessionARM = nullptr; +#else + void * fp_vkCreateDataGraphPipelineSessionARM{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkCreateDataGraphPipelinesARM fp_vkCreateDataGraphPipelinesARM = nullptr; +#else + void * fp_vkCreateDataGraphPipelinesARM{}; +#endif +#if (defined(VK_KHR_deferred_host_operations)) + PFN_vkCreateDeferredOperationKHR fp_vkCreateDeferredOperationKHR = nullptr; +#else + void * fp_vkCreateDeferredOperationKHR{}; +#endif + PFN_vkCreateDescriptorPool fp_vkCreateDescriptorPool = nullptr; + PFN_vkCreateDescriptorSetLayout fp_vkCreateDescriptorSetLayout = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkCreateDescriptorUpdateTemplate fp_vkCreateDescriptorUpdateTemplate = nullptr; +#else + void * fp_vkCreateDescriptorUpdateTemplate{}; +#endif +#if (defined(VK_KHR_descriptor_update_template)) + PFN_vkCreateDescriptorUpdateTemplateKHR fp_vkCreateDescriptorUpdateTemplateKHR = nullptr; +#else + void * fp_vkCreateDescriptorUpdateTemplateKHR{}; +#endif + PFN_vkCreateEvent fp_vkCreateEvent = nullptr; +#if (defined(VK_AMDX_shader_enqueue)) + PFN_vkCreateExecutionGraphPipelinesAMDX fp_vkCreateExecutionGraphPipelinesAMDX = nullptr; +#else + void * fp_vkCreateExecutionGraphPipelinesAMDX{}; +#endif +#if (defined(VK_NV_external_compute_queue)) + PFN_vkCreateExternalComputeQueueNV fp_vkCreateExternalComputeQueueNV = nullptr; +#else + void * fp_vkCreateExternalComputeQueueNV{}; +#endif + PFN_vkCreateFence fp_vkCreateFence = nullptr; + PFN_vkCreateFramebuffer fp_vkCreateFramebuffer = nullptr; + PFN_vkCreateGraphicsPipelines fp_vkCreateGraphicsPipelines = nullptr; + PFN_vkCreateImage fp_vkCreateImage = nullptr; + PFN_vkCreateImageView fp_vkCreateImageView = nullptr; +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkCreateIndirectCommandsLayoutEXT fp_vkCreateIndirectCommandsLayoutEXT = nullptr; +#else + void * fp_vkCreateIndirectCommandsLayoutEXT{}; +#endif +#if (defined(VK_NV_device_generated_commands)) + PFN_vkCreateIndirectCommandsLayoutNV fp_vkCreateIndirectCommandsLayoutNV = nullptr; +#else + void * fp_vkCreateIndirectCommandsLayoutNV{}; +#endif +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkCreateIndirectExecutionSetEXT fp_vkCreateIndirectExecutionSetEXT = nullptr; +#else + void * fp_vkCreateIndirectExecutionSetEXT{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkCreateMicromapEXT fp_vkCreateMicromapEXT = nullptr; +#else + void * fp_vkCreateMicromapEXT{}; +#endif +#if (defined(VK_NV_optical_flow)) + PFN_vkCreateOpticalFlowSessionNV fp_vkCreateOpticalFlowSessionNV = nullptr; +#else + void * fp_vkCreateOpticalFlowSessionNV{}; +#endif +#if (defined(VK_KHR_pipeline_binary)) + PFN_vkCreatePipelineBinariesKHR fp_vkCreatePipelineBinariesKHR = nullptr; +#else + void * fp_vkCreatePipelineBinariesKHR{}; +#endif + PFN_vkCreatePipelineCache fp_vkCreatePipelineCache = nullptr; + PFN_vkCreatePipelineLayout fp_vkCreatePipelineLayout = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkCreatePrivateDataSlot fp_vkCreatePrivateDataSlot = nullptr; +#else + void * fp_vkCreatePrivateDataSlot{}; +#endif +#if (defined(VK_EXT_private_data)) + PFN_vkCreatePrivateDataSlotEXT fp_vkCreatePrivateDataSlotEXT = nullptr; +#else + void * fp_vkCreatePrivateDataSlotEXT{}; +#endif + PFN_vkCreateQueryPool fp_vkCreateQueryPool = nullptr; +#if (defined(VK_KHR_ray_tracing_pipeline)) + PFN_vkCreateRayTracingPipelinesKHR fp_vkCreateRayTracingPipelinesKHR = nullptr; +#else + void * fp_vkCreateRayTracingPipelinesKHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkCreateRayTracingPipelinesNV fp_vkCreateRayTracingPipelinesNV = nullptr; +#else + void * fp_vkCreateRayTracingPipelinesNV{}; +#endif + PFN_vkCreateRenderPass fp_vkCreateRenderPass = nullptr; +#if (defined(VK_VERSION_1_2)) + PFN_vkCreateRenderPass2 fp_vkCreateRenderPass2 = nullptr; +#else + void * fp_vkCreateRenderPass2{}; +#endif +#if (defined(VK_KHR_create_renderpass2)) + PFN_vkCreateRenderPass2KHR fp_vkCreateRenderPass2KHR = nullptr; +#else + void * fp_vkCreateRenderPass2KHR{}; +#endif + PFN_vkCreateSampler fp_vkCreateSampler = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkCreateSamplerYcbcrConversion fp_vkCreateSamplerYcbcrConversion = nullptr; +#else + void * fp_vkCreateSamplerYcbcrConversion{}; +#endif +#if (defined(VK_KHR_sampler_ycbcr_conversion)) + PFN_vkCreateSamplerYcbcrConversionKHR fp_vkCreateSamplerYcbcrConversionKHR = nullptr; +#else + void * fp_vkCreateSamplerYcbcrConversionKHR{}; +#endif + PFN_vkCreateSemaphore fp_vkCreateSemaphore = nullptr; + PFN_vkCreateShaderModule fp_vkCreateShaderModule = nullptr; +#if (defined(VK_EXT_shader_object)) + PFN_vkCreateShadersEXT fp_vkCreateShadersEXT = nullptr; +#else + void * fp_vkCreateShadersEXT{}; +#endif +#if (defined(VK_KHR_display_swapchain)) + PFN_vkCreateSharedSwapchainsKHR fp_vkCreateSharedSwapchainsKHR = nullptr; +#else + void * fp_vkCreateSharedSwapchainsKHR{}; +#endif +#if (defined(VK_KHR_swapchain)) + PFN_vkCreateSwapchainKHR fp_vkCreateSwapchainKHR = nullptr; +#else + void * fp_vkCreateSwapchainKHR{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkCreateTensorARM fp_vkCreateTensorARM = nullptr; +#else + void * fp_vkCreateTensorARM{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkCreateTensorViewARM fp_vkCreateTensorViewARM = nullptr; +#else + void * fp_vkCreateTensorViewARM{}; +#endif +#if (defined(VK_EXT_validation_cache)) + PFN_vkCreateValidationCacheEXT fp_vkCreateValidationCacheEXT = nullptr; +#else + void * fp_vkCreateValidationCacheEXT{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkCreateVideoSessionKHR fp_vkCreateVideoSessionKHR = nullptr; +#else + void * fp_vkCreateVideoSessionKHR{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkCreateVideoSessionParametersKHR fp_vkCreateVideoSessionParametersKHR = nullptr; +#else + void * fp_vkCreateVideoSessionParametersKHR{}; +#endif +#if (defined(VK_EXT_debug_marker)) + PFN_vkDebugMarkerSetObjectNameEXT fp_vkDebugMarkerSetObjectNameEXT = nullptr; +#else + void * fp_vkDebugMarkerSetObjectNameEXT{}; +#endif +#if (defined(VK_EXT_debug_marker)) + PFN_vkDebugMarkerSetObjectTagEXT fp_vkDebugMarkerSetObjectTagEXT = nullptr; +#else + void * fp_vkDebugMarkerSetObjectTagEXT{}; +#endif +#if (defined(VK_KHR_deferred_host_operations)) + PFN_vkDeferredOperationJoinKHR fp_vkDeferredOperationJoinKHR = nullptr; +#else + void * fp_vkDeferredOperationJoinKHR{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkDestroyAccelerationStructureKHR fp_vkDestroyAccelerationStructureKHR = nullptr; +#else + void * fp_vkDestroyAccelerationStructureKHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkDestroyAccelerationStructureNV fp_vkDestroyAccelerationStructureNV = nullptr; +#else + void * fp_vkDestroyAccelerationStructureNV{}; +#endif + PFN_vkDestroyBuffer fp_vkDestroyBuffer = nullptr; +#if (defined(VK_FUCHSIA_buffer_collection)) + PFN_vkDestroyBufferCollectionFUCHSIA fp_vkDestroyBufferCollectionFUCHSIA = nullptr; +#else + void * fp_vkDestroyBufferCollectionFUCHSIA{}; +#endif + PFN_vkDestroyBufferView fp_vkDestroyBufferView = nullptr; + PFN_vkDestroyCommandPool fp_vkDestroyCommandPool = nullptr; +#if (defined(VK_NVX_binary_import)) + PFN_vkDestroyCuFunctionNVX fp_vkDestroyCuFunctionNVX = nullptr; +#else + void * fp_vkDestroyCuFunctionNVX{}; +#endif +#if (defined(VK_NVX_binary_import)) + PFN_vkDestroyCuModuleNVX fp_vkDestroyCuModuleNVX = nullptr; +#else + void * fp_vkDestroyCuModuleNVX{}; +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + PFN_vkDestroyCudaFunctionNV fp_vkDestroyCudaFunctionNV = nullptr; +#else + void * fp_vkDestroyCudaFunctionNV{}; +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + PFN_vkDestroyCudaModuleNV fp_vkDestroyCudaModuleNV = nullptr; +#else + void * fp_vkDestroyCudaModuleNV{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkDestroyDataGraphPipelineSessionARM fp_vkDestroyDataGraphPipelineSessionARM = nullptr; +#else + void * fp_vkDestroyDataGraphPipelineSessionARM{}; +#endif +#if (defined(VK_KHR_deferred_host_operations)) + PFN_vkDestroyDeferredOperationKHR fp_vkDestroyDeferredOperationKHR = nullptr; +#else + void * fp_vkDestroyDeferredOperationKHR{}; +#endif + PFN_vkDestroyDescriptorPool fp_vkDestroyDescriptorPool = nullptr; + PFN_vkDestroyDescriptorSetLayout fp_vkDestroyDescriptorSetLayout = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkDestroyDescriptorUpdateTemplate fp_vkDestroyDescriptorUpdateTemplate = nullptr; +#else + void * fp_vkDestroyDescriptorUpdateTemplate{}; +#endif +#if (defined(VK_KHR_descriptor_update_template)) + PFN_vkDestroyDescriptorUpdateTemplateKHR fp_vkDestroyDescriptorUpdateTemplateKHR = nullptr; +#else + void * fp_vkDestroyDescriptorUpdateTemplateKHR{}; +#endif + PFN_vkDestroyEvent fp_vkDestroyEvent = nullptr; +#if (defined(VK_NV_external_compute_queue)) + PFN_vkDestroyExternalComputeQueueNV fp_vkDestroyExternalComputeQueueNV = nullptr; +#else + void * fp_vkDestroyExternalComputeQueueNV{}; +#endif + PFN_vkDestroyFence fp_vkDestroyFence = nullptr; + PFN_vkDestroyFramebuffer fp_vkDestroyFramebuffer = nullptr; + PFN_vkDestroyImage fp_vkDestroyImage = nullptr; + PFN_vkDestroyImageView fp_vkDestroyImageView = nullptr; +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkDestroyIndirectCommandsLayoutEXT fp_vkDestroyIndirectCommandsLayoutEXT = nullptr; +#else + void * fp_vkDestroyIndirectCommandsLayoutEXT{}; +#endif +#if (defined(VK_NV_device_generated_commands)) + PFN_vkDestroyIndirectCommandsLayoutNV fp_vkDestroyIndirectCommandsLayoutNV = nullptr; +#else + void * fp_vkDestroyIndirectCommandsLayoutNV{}; +#endif +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkDestroyIndirectExecutionSetEXT fp_vkDestroyIndirectExecutionSetEXT = nullptr; +#else + void * fp_vkDestroyIndirectExecutionSetEXT{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkDestroyMicromapEXT fp_vkDestroyMicromapEXT = nullptr; +#else + void * fp_vkDestroyMicromapEXT{}; +#endif +#if (defined(VK_NV_optical_flow)) + PFN_vkDestroyOpticalFlowSessionNV fp_vkDestroyOpticalFlowSessionNV = nullptr; +#else + void * fp_vkDestroyOpticalFlowSessionNV{}; +#endif + PFN_vkDestroyPipeline fp_vkDestroyPipeline = nullptr; +#if (defined(VK_KHR_pipeline_binary)) + PFN_vkDestroyPipelineBinaryKHR fp_vkDestroyPipelineBinaryKHR = nullptr; +#else + void * fp_vkDestroyPipelineBinaryKHR{}; +#endif + PFN_vkDestroyPipelineCache fp_vkDestroyPipelineCache = nullptr; + PFN_vkDestroyPipelineLayout fp_vkDestroyPipelineLayout = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkDestroyPrivateDataSlot fp_vkDestroyPrivateDataSlot = nullptr; +#else + void * fp_vkDestroyPrivateDataSlot{}; +#endif +#if (defined(VK_EXT_private_data)) + PFN_vkDestroyPrivateDataSlotEXT fp_vkDestroyPrivateDataSlotEXT = nullptr; +#else + void * fp_vkDestroyPrivateDataSlotEXT{}; +#endif + PFN_vkDestroyQueryPool fp_vkDestroyQueryPool = nullptr; + PFN_vkDestroyRenderPass fp_vkDestroyRenderPass = nullptr; + PFN_vkDestroySampler fp_vkDestroySampler = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkDestroySamplerYcbcrConversion fp_vkDestroySamplerYcbcrConversion = nullptr; +#else + void * fp_vkDestroySamplerYcbcrConversion{}; +#endif +#if (defined(VK_KHR_sampler_ycbcr_conversion)) + PFN_vkDestroySamplerYcbcrConversionKHR fp_vkDestroySamplerYcbcrConversionKHR = nullptr; +#else + void * fp_vkDestroySamplerYcbcrConversionKHR{}; +#endif + PFN_vkDestroySemaphore fp_vkDestroySemaphore = nullptr; +#if (defined(VK_EXT_shader_object)) + PFN_vkDestroyShaderEXT fp_vkDestroyShaderEXT = nullptr; +#else + void * fp_vkDestroyShaderEXT{}; +#endif + PFN_vkDestroyShaderModule fp_vkDestroyShaderModule = nullptr; +#if (defined(VK_KHR_swapchain)) + PFN_vkDestroySwapchainKHR fp_vkDestroySwapchainKHR = nullptr; +#else + void * fp_vkDestroySwapchainKHR{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkDestroyTensorARM fp_vkDestroyTensorARM = nullptr; +#else + void * fp_vkDestroyTensorARM{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkDestroyTensorViewARM fp_vkDestroyTensorViewARM = nullptr; +#else + void * fp_vkDestroyTensorViewARM{}; +#endif +#if (defined(VK_EXT_validation_cache)) + PFN_vkDestroyValidationCacheEXT fp_vkDestroyValidationCacheEXT = nullptr; +#else + void * fp_vkDestroyValidationCacheEXT{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkDestroyVideoSessionKHR fp_vkDestroyVideoSessionKHR = nullptr; +#else + void * fp_vkDestroyVideoSessionKHR{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkDestroyVideoSessionParametersKHR fp_vkDestroyVideoSessionParametersKHR = nullptr; +#else + void * fp_vkDestroyVideoSessionParametersKHR{}; +#endif + PFN_vkDeviceWaitIdle fp_vkDeviceWaitIdle = nullptr; +#if (defined(VK_EXT_display_control)) + PFN_vkDisplayPowerControlEXT fp_vkDisplayPowerControlEXT = nullptr; +#else + void * fp_vkDisplayPowerControlEXT{}; +#endif + PFN_vkEndCommandBuffer fp_vkEndCommandBuffer = nullptr; +#if (defined(VK_EXT_metal_objects)) + PFN_vkExportMetalObjectsEXT fp_vkExportMetalObjectsEXT = nullptr; +#else + void * fp_vkExportMetalObjectsEXT{}; +#endif + PFN_vkFlushMappedMemoryRanges fp_vkFlushMappedMemoryRanges = nullptr; + PFN_vkFreeCommandBuffers fp_vkFreeCommandBuffers = nullptr; + PFN_vkFreeDescriptorSets fp_vkFreeDescriptorSets = nullptr; + PFN_vkFreeMemory fp_vkFreeMemory = nullptr; +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkGetAccelerationStructureBuildSizesKHR fp_vkGetAccelerationStructureBuildSizesKHR = nullptr; +#else + void * fp_vkGetAccelerationStructureBuildSizesKHR{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkGetAccelerationStructureDeviceAddressKHR fp_vkGetAccelerationStructureDeviceAddressKHR = nullptr; +#else + void * fp_vkGetAccelerationStructureDeviceAddressKHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkGetAccelerationStructureHandleNV fp_vkGetAccelerationStructureHandleNV = nullptr; +#else + void * fp_vkGetAccelerationStructureHandleNV{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkGetAccelerationStructureMemoryRequirementsNV fp_vkGetAccelerationStructureMemoryRequirementsNV = nullptr; +#else + void * fp_vkGetAccelerationStructureMemoryRequirementsNV{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT fp_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT = nullptr; +#else + void * fp_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT{}; +#endif +#if (defined(VK_ANDROID_external_memory_android_hardware_buffer)) + PFN_vkGetAndroidHardwareBufferPropertiesANDROID fp_vkGetAndroidHardwareBufferPropertiesANDROID = nullptr; +#else + void * fp_vkGetAndroidHardwareBufferPropertiesANDROID{}; +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + PFN_vkGetBufferCollectionPropertiesFUCHSIA fp_vkGetBufferCollectionPropertiesFUCHSIA = nullptr; +#else + void * fp_vkGetBufferCollectionPropertiesFUCHSIA{}; +#endif +#if (defined(VK_VERSION_1_2)) + PFN_vkGetBufferDeviceAddress fp_vkGetBufferDeviceAddress = nullptr; +#else + void * fp_vkGetBufferDeviceAddress{}; +#endif +#if (defined(VK_EXT_buffer_device_address)) + PFN_vkGetBufferDeviceAddressEXT fp_vkGetBufferDeviceAddressEXT = nullptr; +#else + void * fp_vkGetBufferDeviceAddressEXT{}; +#endif +#if (defined(VK_KHR_buffer_device_address)) + PFN_vkGetBufferDeviceAddressKHR fp_vkGetBufferDeviceAddressKHR = nullptr; +#else + void * fp_vkGetBufferDeviceAddressKHR{}; +#endif + PFN_vkGetBufferMemoryRequirements fp_vkGetBufferMemoryRequirements = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetBufferMemoryRequirements2 fp_vkGetBufferMemoryRequirements2 = nullptr; +#else + void * fp_vkGetBufferMemoryRequirements2{}; +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + PFN_vkGetBufferMemoryRequirements2KHR fp_vkGetBufferMemoryRequirements2KHR = nullptr; +#else + void * fp_vkGetBufferMemoryRequirements2KHR{}; +#endif +#if (defined(VK_VERSION_1_2)) + PFN_vkGetBufferOpaqueCaptureAddress fp_vkGetBufferOpaqueCaptureAddress = nullptr; +#else + void * fp_vkGetBufferOpaqueCaptureAddress{}; +#endif +#if (defined(VK_KHR_buffer_device_address)) + PFN_vkGetBufferOpaqueCaptureAddressKHR fp_vkGetBufferOpaqueCaptureAddressKHR = nullptr; +#else + void * fp_vkGetBufferOpaqueCaptureAddressKHR{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT fp_vkGetBufferOpaqueCaptureDescriptorDataEXT = nullptr; +#else + void * fp_vkGetBufferOpaqueCaptureDescriptorDataEXT{}; +#endif +#if (defined(VK_EXT_calibrated_timestamps)) + PFN_vkGetCalibratedTimestampsEXT fp_vkGetCalibratedTimestampsEXT = nullptr; +#else + void * fp_vkGetCalibratedTimestampsEXT{}; +#endif +#if (defined(VK_KHR_calibrated_timestamps)) + PFN_vkGetCalibratedTimestampsKHR fp_vkGetCalibratedTimestampsKHR = nullptr; +#else + void * fp_vkGetCalibratedTimestampsKHR{}; +#endif +#if (defined(VK_NV_cluster_acceleration_structure)) + PFN_vkGetClusterAccelerationStructureBuildSizesNV fp_vkGetClusterAccelerationStructureBuildSizesNV = nullptr; +#else + void * fp_vkGetClusterAccelerationStructureBuildSizesNV{}; +#endif +#if (defined(VK_NV_cuda_kernel_launch)) + PFN_vkGetCudaModuleCacheNV fp_vkGetCudaModuleCacheNV = nullptr; +#else + void * fp_vkGetCudaModuleCacheNV{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkGetDataGraphPipelineAvailablePropertiesARM fp_vkGetDataGraphPipelineAvailablePropertiesARM = nullptr; +#else + void * fp_vkGetDataGraphPipelineAvailablePropertiesARM{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkGetDataGraphPipelinePropertiesARM fp_vkGetDataGraphPipelinePropertiesARM = nullptr; +#else + void * fp_vkGetDataGraphPipelinePropertiesARM{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM fp_vkGetDataGraphPipelineSessionBindPointRequirementsARM = nullptr; +#else + void * fp_vkGetDataGraphPipelineSessionBindPointRequirementsARM{}; +#endif +#if (defined(VK_ARM_data_graph)) + PFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM fp_vkGetDataGraphPipelineSessionMemoryRequirementsARM = nullptr; +#else + void * fp_vkGetDataGraphPipelineSessionMemoryRequirementsARM{}; +#endif +#if (defined(VK_KHR_deferred_host_operations)) + PFN_vkGetDeferredOperationMaxConcurrencyKHR fp_vkGetDeferredOperationMaxConcurrencyKHR = nullptr; +#else + void * fp_vkGetDeferredOperationMaxConcurrencyKHR{}; +#endif +#if (defined(VK_KHR_deferred_host_operations)) + PFN_vkGetDeferredOperationResultKHR fp_vkGetDeferredOperationResultKHR = nullptr; +#else + void * fp_vkGetDeferredOperationResultKHR{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkGetDescriptorEXT fp_vkGetDescriptorEXT = nullptr; +#else + void * fp_vkGetDescriptorEXT{}; +#endif +#if (defined(VK_VALVE_descriptor_set_host_mapping)) + PFN_vkGetDescriptorSetHostMappingVALVE fp_vkGetDescriptorSetHostMappingVALVE = nullptr; +#else + void * fp_vkGetDescriptorSetHostMappingVALVE{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkGetDescriptorSetLayoutBindingOffsetEXT fp_vkGetDescriptorSetLayoutBindingOffsetEXT = nullptr; +#else + void * fp_vkGetDescriptorSetLayoutBindingOffsetEXT{}; +#endif +#if (defined(VK_VALVE_descriptor_set_host_mapping)) + PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE fp_vkGetDescriptorSetLayoutHostMappingInfoVALVE = nullptr; +#else + void * fp_vkGetDescriptorSetLayoutHostMappingInfoVALVE{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkGetDescriptorSetLayoutSizeEXT fp_vkGetDescriptorSetLayoutSizeEXT = nullptr; +#else + void * fp_vkGetDescriptorSetLayoutSizeEXT{}; +#endif +#if (defined(VK_VERSION_1_1)) + PFN_vkGetDescriptorSetLayoutSupport fp_vkGetDescriptorSetLayoutSupport = nullptr; +#else + void * fp_vkGetDescriptorSetLayoutSupport{}; +#endif +#if (defined(VK_KHR_maintenance3)) + PFN_vkGetDescriptorSetLayoutSupportKHR fp_vkGetDescriptorSetLayoutSupportKHR = nullptr; +#else + void * fp_vkGetDescriptorSetLayoutSupportKHR{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkGetDeviceAccelerationStructureCompatibilityKHR fp_vkGetDeviceAccelerationStructureCompatibilityKHR = nullptr; +#else + void * fp_vkGetDeviceAccelerationStructureCompatibilityKHR{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkGetDeviceBufferMemoryRequirements fp_vkGetDeviceBufferMemoryRequirements = nullptr; +#else + void * fp_vkGetDeviceBufferMemoryRequirements{}; +#endif +#if (defined(VK_KHR_maintenance4)) + PFN_vkGetDeviceBufferMemoryRequirementsKHR fp_vkGetDeviceBufferMemoryRequirementsKHR = nullptr; +#else + void * fp_vkGetDeviceBufferMemoryRequirementsKHR{}; +#endif +#if (defined(VK_EXT_device_fault)) + PFN_vkGetDeviceFaultInfoEXT fp_vkGetDeviceFaultInfoEXT = nullptr; +#else + void * fp_vkGetDeviceFaultInfoEXT{}; +#endif +#if (defined(VK_VERSION_1_1)) + PFN_vkGetDeviceGroupPeerMemoryFeatures fp_vkGetDeviceGroupPeerMemoryFeatures = nullptr; +#else + void * fp_vkGetDeviceGroupPeerMemoryFeatures{}; +#endif +#if (defined(VK_KHR_device_group)) + PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR fp_vkGetDeviceGroupPeerMemoryFeaturesKHR = nullptr; +#else + void * fp_vkGetDeviceGroupPeerMemoryFeaturesKHR{}; +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + PFN_vkGetDeviceGroupPresentCapabilitiesKHR fp_vkGetDeviceGroupPresentCapabilitiesKHR = nullptr; +#else + void * fp_vkGetDeviceGroupPresentCapabilitiesKHR{}; +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + PFN_vkGetDeviceGroupSurfacePresentModes2EXT fp_vkGetDeviceGroupSurfacePresentModes2EXT = nullptr; +#else + void * fp_vkGetDeviceGroupSurfacePresentModes2EXT{}; +#endif +#if (defined(VK_KHR_swapchain) || defined(VK_KHR_device_group)) + PFN_vkGetDeviceGroupSurfacePresentModesKHR fp_vkGetDeviceGroupSurfacePresentModesKHR = nullptr; +#else + void * fp_vkGetDeviceGroupSurfacePresentModesKHR{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkGetDeviceImageMemoryRequirements fp_vkGetDeviceImageMemoryRequirements = nullptr; +#else + void * fp_vkGetDeviceImageMemoryRequirements{}; +#endif +#if (defined(VK_KHR_maintenance4)) + PFN_vkGetDeviceImageMemoryRequirementsKHR fp_vkGetDeviceImageMemoryRequirementsKHR = nullptr; +#else + void * fp_vkGetDeviceImageMemoryRequirementsKHR{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkGetDeviceImageSparseMemoryRequirements fp_vkGetDeviceImageSparseMemoryRequirements = nullptr; +#else + void * fp_vkGetDeviceImageSparseMemoryRequirements{}; +#endif +#if (defined(VK_KHR_maintenance4)) + PFN_vkGetDeviceImageSparseMemoryRequirementsKHR fp_vkGetDeviceImageSparseMemoryRequirementsKHR = nullptr; +#else + void * fp_vkGetDeviceImageSparseMemoryRequirementsKHR{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkGetDeviceImageSubresourceLayout fp_vkGetDeviceImageSubresourceLayout = nullptr; +#else + void * fp_vkGetDeviceImageSubresourceLayout{}; +#endif +#if (defined(VK_KHR_maintenance5)) + PFN_vkGetDeviceImageSubresourceLayoutKHR fp_vkGetDeviceImageSubresourceLayoutKHR = nullptr; +#else + void * fp_vkGetDeviceImageSubresourceLayoutKHR{}; +#endif + PFN_vkGetDeviceMemoryCommitment fp_vkGetDeviceMemoryCommitment = nullptr; +#if (defined(VK_VERSION_1_2)) + PFN_vkGetDeviceMemoryOpaqueCaptureAddress fp_vkGetDeviceMemoryOpaqueCaptureAddress = nullptr; +#else + void * fp_vkGetDeviceMemoryOpaqueCaptureAddress{}; +#endif +#if (defined(VK_KHR_buffer_device_address)) + PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR fp_vkGetDeviceMemoryOpaqueCaptureAddressKHR = nullptr; +#else + void * fp_vkGetDeviceMemoryOpaqueCaptureAddressKHR{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkGetDeviceMicromapCompatibilityEXT fp_vkGetDeviceMicromapCompatibilityEXT = nullptr; +#else + void * fp_vkGetDeviceMicromapCompatibilityEXT{}; +#endif + PFN_vkGetDeviceQueue fp_vkGetDeviceQueue = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetDeviceQueue2 fp_vkGetDeviceQueue2 = nullptr; +#else + void * fp_vkGetDeviceQueue2{}; +#endif +#if (defined(VK_HUAWEI_subpass_shading)) + PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI fp_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI = nullptr; +#else + void * fp_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkGetDeviceTensorMemoryRequirementsARM fp_vkGetDeviceTensorMemoryRequirementsARM = nullptr; +#else + void * fp_vkGetDeviceTensorMemoryRequirementsARM{}; +#endif +#if (defined(VK_QCOM_tile_properties)) + PFN_vkGetDynamicRenderingTilePropertiesQCOM fp_vkGetDynamicRenderingTilePropertiesQCOM = nullptr; +#else + void * fp_vkGetDynamicRenderingTilePropertiesQCOM{}; +#endif +#if (defined(VK_KHR_video_encode_queue)) + PFN_vkGetEncodedVideoSessionParametersKHR fp_vkGetEncodedVideoSessionParametersKHR = nullptr; +#else + void * fp_vkGetEncodedVideoSessionParametersKHR{}; +#endif + PFN_vkGetEventStatus fp_vkGetEventStatus = nullptr; +#if (defined(VK_AMDX_shader_enqueue)) + PFN_vkGetExecutionGraphPipelineNodeIndexAMDX fp_vkGetExecutionGraphPipelineNodeIndexAMDX = nullptr; +#else + void * fp_vkGetExecutionGraphPipelineNodeIndexAMDX{}; +#endif +#if (defined(VK_AMDX_shader_enqueue)) + PFN_vkGetExecutionGraphPipelineScratchSizeAMDX fp_vkGetExecutionGraphPipelineScratchSizeAMDX = nullptr; +#else + void * fp_vkGetExecutionGraphPipelineScratchSizeAMDX{}; +#endif +#if (defined(VK_NV_external_compute_queue)) + PFN_vkGetExternalComputeQueueDataNV fp_vkGetExternalComputeQueueDataNV = nullptr; +#else + void * fp_vkGetExternalComputeQueueDataNV{}; +#endif +#if (defined(VK_KHR_external_fence_fd)) + PFN_vkGetFenceFdKHR fp_vkGetFenceFdKHR = nullptr; +#else + void * fp_vkGetFenceFdKHR{}; +#endif + PFN_vkGetFenceStatus fp_vkGetFenceStatus = nullptr; +#if (defined(VK_KHR_external_fence_win32)) + PFN_vkGetFenceWin32HandleKHR fp_vkGetFenceWin32HandleKHR = nullptr; +#else + void * fp_vkGetFenceWin32HandleKHR{}; +#endif +#if (defined(VK_QCOM_tile_properties)) + PFN_vkGetFramebufferTilePropertiesQCOM fp_vkGetFramebufferTilePropertiesQCOM = nullptr; +#else + void * fp_vkGetFramebufferTilePropertiesQCOM{}; +#endif +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkGetGeneratedCommandsMemoryRequirementsEXT fp_vkGetGeneratedCommandsMemoryRequirementsEXT = nullptr; +#else + void * fp_vkGetGeneratedCommandsMemoryRequirementsEXT{}; +#endif +#if (defined(VK_NV_device_generated_commands)) + PFN_vkGetGeneratedCommandsMemoryRequirementsNV fp_vkGetGeneratedCommandsMemoryRequirementsNV = nullptr; +#else + void * fp_vkGetGeneratedCommandsMemoryRequirementsNV{}; +#endif +#if (defined(VK_EXT_image_drm_format_modifier)) + PFN_vkGetImageDrmFormatModifierPropertiesEXT fp_vkGetImageDrmFormatModifierPropertiesEXT = nullptr; +#else + void * fp_vkGetImageDrmFormatModifierPropertiesEXT{}; +#endif + PFN_vkGetImageMemoryRequirements fp_vkGetImageMemoryRequirements = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetImageMemoryRequirements2 fp_vkGetImageMemoryRequirements2 = nullptr; +#else + void * fp_vkGetImageMemoryRequirements2{}; +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + PFN_vkGetImageMemoryRequirements2KHR fp_vkGetImageMemoryRequirements2KHR = nullptr; +#else + void * fp_vkGetImageMemoryRequirements2KHR{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkGetImageOpaqueCaptureDescriptorDataEXT fp_vkGetImageOpaqueCaptureDescriptorDataEXT = nullptr; +#else + void * fp_vkGetImageOpaqueCaptureDescriptorDataEXT{}; +#endif + PFN_vkGetImageSparseMemoryRequirements fp_vkGetImageSparseMemoryRequirements = nullptr; +#if (defined(VK_VERSION_1_1)) + PFN_vkGetImageSparseMemoryRequirements2 fp_vkGetImageSparseMemoryRequirements2 = nullptr; +#else + void * fp_vkGetImageSparseMemoryRequirements2{}; +#endif +#if (defined(VK_KHR_get_memory_requirements2)) + PFN_vkGetImageSparseMemoryRequirements2KHR fp_vkGetImageSparseMemoryRequirements2KHR = nullptr; +#else + void * fp_vkGetImageSparseMemoryRequirements2KHR{}; +#endif + PFN_vkGetImageSubresourceLayout fp_vkGetImageSubresourceLayout = nullptr; +#if (defined(VK_VERSION_1_4)) + PFN_vkGetImageSubresourceLayout2 fp_vkGetImageSubresourceLayout2 = nullptr; +#else + void * fp_vkGetImageSubresourceLayout2{}; +#endif +#if (defined(VK_EXT_host_image_copy) || defined(VK_EXT_image_compression_control)) + PFN_vkGetImageSubresourceLayout2EXT fp_vkGetImageSubresourceLayout2EXT = nullptr; +#else + void * fp_vkGetImageSubresourceLayout2EXT{}; +#endif +#if (defined(VK_KHR_maintenance5)) + PFN_vkGetImageSubresourceLayout2KHR fp_vkGetImageSubresourceLayout2KHR = nullptr; +#else + void * fp_vkGetImageSubresourceLayout2KHR{}; +#endif +#if (defined(VK_NVX_image_view_handle)) + PFN_vkGetImageViewAddressNVX fp_vkGetImageViewAddressNVX = nullptr; +#else + void * fp_vkGetImageViewAddressNVX{}; +#endif +#if (defined(VK_NVX_image_view_handle)) + PFN_vkGetImageViewHandle64NVX fp_vkGetImageViewHandle64NVX = nullptr; +#else + void * fp_vkGetImageViewHandle64NVX{}; +#endif +#if (defined(VK_NVX_image_view_handle)) + PFN_vkGetImageViewHandleNVX fp_vkGetImageViewHandleNVX = nullptr; +#else + void * fp_vkGetImageViewHandleNVX{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT fp_vkGetImageViewOpaqueCaptureDescriptorDataEXT = nullptr; +#else + void * fp_vkGetImageViewOpaqueCaptureDescriptorDataEXT{}; +#endif +#if (defined(VK_NV_low_latency2)) && VK_HEADER_VERSION >= 271 + PFN_vkGetLatencyTimingsNV fp_vkGetLatencyTimingsNV = nullptr; +#else + void * fp_vkGetLatencyTimingsNV{}; +#endif +#if (defined(VK_ANDROID_external_memory_android_hardware_buffer)) + PFN_vkGetMemoryAndroidHardwareBufferANDROID fp_vkGetMemoryAndroidHardwareBufferANDROID = nullptr; +#else + void * fp_vkGetMemoryAndroidHardwareBufferANDROID{}; +#endif +#if (defined(VK_KHR_external_memory_fd)) + PFN_vkGetMemoryFdKHR fp_vkGetMemoryFdKHR = nullptr; +#else + void * fp_vkGetMemoryFdKHR{}; +#endif +#if (defined(VK_KHR_external_memory_fd)) + PFN_vkGetMemoryFdPropertiesKHR fp_vkGetMemoryFdPropertiesKHR = nullptr; +#else + void * fp_vkGetMemoryFdPropertiesKHR{}; +#endif +#if (defined(VK_EXT_external_memory_host)) + PFN_vkGetMemoryHostPointerPropertiesEXT fp_vkGetMemoryHostPointerPropertiesEXT = nullptr; +#else + void * fp_vkGetMemoryHostPointerPropertiesEXT{}; +#endif +#if (defined(VK_EXT_external_memory_metal)) + PFN_vkGetMemoryMetalHandleEXT fp_vkGetMemoryMetalHandleEXT = nullptr; +#else + void * fp_vkGetMemoryMetalHandleEXT{}; +#endif +#if (defined(VK_EXT_external_memory_metal)) + PFN_vkGetMemoryMetalHandlePropertiesEXT fp_vkGetMemoryMetalHandlePropertiesEXT = nullptr; +#else + void * fp_vkGetMemoryMetalHandlePropertiesEXT{}; +#endif +#if (defined(VK_NV_external_memory_rdma)) + PFN_vkGetMemoryRemoteAddressNV fp_vkGetMemoryRemoteAddressNV = nullptr; +#else + void * fp_vkGetMemoryRemoteAddressNV{}; +#endif +#if (defined(VK_KHR_external_memory_win32)) + PFN_vkGetMemoryWin32HandleKHR fp_vkGetMemoryWin32HandleKHR = nullptr; +#else + void * fp_vkGetMemoryWin32HandleKHR{}; +#endif +#if (defined(VK_NV_external_memory_win32)) + PFN_vkGetMemoryWin32HandleNV fp_vkGetMemoryWin32HandleNV = nullptr; +#else + void * fp_vkGetMemoryWin32HandleNV{}; +#endif +#if (defined(VK_KHR_external_memory_win32)) + PFN_vkGetMemoryWin32HandlePropertiesKHR fp_vkGetMemoryWin32HandlePropertiesKHR = nullptr; +#else + void * fp_vkGetMemoryWin32HandlePropertiesKHR{}; +#endif +#if (defined(VK_FUCHSIA_external_memory)) + PFN_vkGetMemoryZirconHandleFUCHSIA fp_vkGetMemoryZirconHandleFUCHSIA = nullptr; +#else + void * fp_vkGetMemoryZirconHandleFUCHSIA{}; +#endif +#if (defined(VK_FUCHSIA_external_memory)) + PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA fp_vkGetMemoryZirconHandlePropertiesFUCHSIA = nullptr; +#else + void * fp_vkGetMemoryZirconHandlePropertiesFUCHSIA{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkGetMicromapBuildSizesEXT fp_vkGetMicromapBuildSizesEXT = nullptr; +#else + void * fp_vkGetMicromapBuildSizesEXT{}; +#endif +#if (defined(VK_NV_partitioned_acceleration_structure)) + PFN_vkGetPartitionedAccelerationStructuresBuildSizesNV fp_vkGetPartitionedAccelerationStructuresBuildSizesNV = nullptr; +#else + void * fp_vkGetPartitionedAccelerationStructuresBuildSizesNV{}; +#endif +#if (defined(VK_GOOGLE_display_timing)) + PFN_vkGetPastPresentationTimingGOOGLE fp_vkGetPastPresentationTimingGOOGLE = nullptr; +#else + void * fp_vkGetPastPresentationTimingGOOGLE{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkGetPerformanceParameterINTEL fp_vkGetPerformanceParameterINTEL = nullptr; +#else + void * fp_vkGetPerformanceParameterINTEL{}; +#endif +#if (defined(VK_KHR_pipeline_binary)) + PFN_vkGetPipelineBinaryDataKHR fp_vkGetPipelineBinaryDataKHR = nullptr; +#else + void * fp_vkGetPipelineBinaryDataKHR{}; +#endif + PFN_vkGetPipelineCacheData fp_vkGetPipelineCacheData = nullptr; +#if (defined(VK_KHR_pipeline_executable_properties)) + PFN_vkGetPipelineExecutableInternalRepresentationsKHR fp_vkGetPipelineExecutableInternalRepresentationsKHR = nullptr; +#else + void * fp_vkGetPipelineExecutableInternalRepresentationsKHR{}; +#endif +#if (defined(VK_KHR_pipeline_executable_properties)) + PFN_vkGetPipelineExecutablePropertiesKHR fp_vkGetPipelineExecutablePropertiesKHR = nullptr; +#else + void * fp_vkGetPipelineExecutablePropertiesKHR{}; +#endif +#if (defined(VK_KHR_pipeline_executable_properties)) + PFN_vkGetPipelineExecutableStatisticsKHR fp_vkGetPipelineExecutableStatisticsKHR = nullptr; +#else + void * fp_vkGetPipelineExecutableStatisticsKHR{}; +#endif +#if (defined(VK_NV_device_generated_commands_compute)) + PFN_vkGetPipelineIndirectDeviceAddressNV fp_vkGetPipelineIndirectDeviceAddressNV = nullptr; +#else + void * fp_vkGetPipelineIndirectDeviceAddressNV{}; +#endif +#if (defined(VK_NV_device_generated_commands_compute)) + PFN_vkGetPipelineIndirectMemoryRequirementsNV fp_vkGetPipelineIndirectMemoryRequirementsNV = nullptr; +#else + void * fp_vkGetPipelineIndirectMemoryRequirementsNV{}; +#endif +#if (defined(VK_KHR_pipeline_binary)) + PFN_vkGetPipelineKeyKHR fp_vkGetPipelineKeyKHR = nullptr; +#else + void * fp_vkGetPipelineKeyKHR{}; +#endif +#if (defined(VK_EXT_pipeline_properties)) + PFN_vkGetPipelinePropertiesEXT fp_vkGetPipelinePropertiesEXT = nullptr; +#else + void * fp_vkGetPipelinePropertiesEXT{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkGetPrivateData fp_vkGetPrivateData = nullptr; +#else + void * fp_vkGetPrivateData{}; +#endif +#if (defined(VK_EXT_private_data)) + PFN_vkGetPrivateDataEXT fp_vkGetPrivateDataEXT = nullptr; +#else + void * fp_vkGetPrivateDataEXT{}; +#endif + PFN_vkGetQueryPoolResults fp_vkGetQueryPoolResults = nullptr; +#if (defined(VK_NV_device_diagnostic_checkpoints)) + PFN_vkGetQueueCheckpointData2NV fp_vkGetQueueCheckpointData2NV = nullptr; +#else + void * fp_vkGetQueueCheckpointData2NV{}; +#endif +#if (defined(VK_NV_device_diagnostic_checkpoints)) + PFN_vkGetQueueCheckpointDataNV fp_vkGetQueueCheckpointDataNV = nullptr; +#else + void * fp_vkGetQueueCheckpointDataNV{}; +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR fp_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = nullptr; +#else + void * fp_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR{}; +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + PFN_vkGetRayTracingShaderGroupHandlesKHR fp_vkGetRayTracingShaderGroupHandlesKHR = nullptr; +#else + void * fp_vkGetRayTracingShaderGroupHandlesKHR{}; +#endif +#if (defined(VK_NV_ray_tracing)) + PFN_vkGetRayTracingShaderGroupHandlesNV fp_vkGetRayTracingShaderGroupHandlesNV = nullptr; +#else + void * fp_vkGetRayTracingShaderGroupHandlesNV{}; +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + PFN_vkGetRayTracingShaderGroupStackSizeKHR fp_vkGetRayTracingShaderGroupStackSizeKHR = nullptr; +#else + void * fp_vkGetRayTracingShaderGroupStackSizeKHR{}; +#endif +#if (defined(VK_GOOGLE_display_timing)) + PFN_vkGetRefreshCycleDurationGOOGLE fp_vkGetRefreshCycleDurationGOOGLE = nullptr; +#else + void * fp_vkGetRefreshCycleDurationGOOGLE{}; +#endif + PFN_vkGetRenderAreaGranularity fp_vkGetRenderAreaGranularity = nullptr; +#if (defined(VK_VERSION_1_4)) + PFN_vkGetRenderingAreaGranularity fp_vkGetRenderingAreaGranularity = nullptr; +#else + void * fp_vkGetRenderingAreaGranularity{}; +#endif +#if (defined(VK_KHR_maintenance5)) + PFN_vkGetRenderingAreaGranularityKHR fp_vkGetRenderingAreaGranularityKHR = nullptr; +#else + void * fp_vkGetRenderingAreaGranularityKHR{}; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT fp_vkGetSamplerOpaqueCaptureDescriptorDataEXT = nullptr; +#else + void * fp_vkGetSamplerOpaqueCaptureDescriptorDataEXT{}; +#endif +#if (defined(VK_QNX_external_memory_screen_buffer)) + PFN_vkGetScreenBufferPropertiesQNX fp_vkGetScreenBufferPropertiesQNX = nullptr; +#else + void * fp_vkGetScreenBufferPropertiesQNX{}; +#endif +#if (defined(VK_VERSION_1_2)) + PFN_vkGetSemaphoreCounterValue fp_vkGetSemaphoreCounterValue = nullptr; +#else + void * fp_vkGetSemaphoreCounterValue{}; +#endif +#if (defined(VK_KHR_timeline_semaphore)) + PFN_vkGetSemaphoreCounterValueKHR fp_vkGetSemaphoreCounterValueKHR = nullptr; +#else + void * fp_vkGetSemaphoreCounterValueKHR{}; +#endif +#if (defined(VK_KHR_external_semaphore_fd)) + PFN_vkGetSemaphoreFdKHR fp_vkGetSemaphoreFdKHR = nullptr; +#else + void * fp_vkGetSemaphoreFdKHR{}; +#endif +#if (defined(VK_KHR_external_semaphore_win32)) + PFN_vkGetSemaphoreWin32HandleKHR fp_vkGetSemaphoreWin32HandleKHR = nullptr; +#else + void * fp_vkGetSemaphoreWin32HandleKHR{}; +#endif +#if (defined(VK_FUCHSIA_external_semaphore)) + PFN_vkGetSemaphoreZirconHandleFUCHSIA fp_vkGetSemaphoreZirconHandleFUCHSIA = nullptr; +#else + void * fp_vkGetSemaphoreZirconHandleFUCHSIA{}; +#endif +#if (defined(VK_EXT_shader_object)) + PFN_vkGetShaderBinaryDataEXT fp_vkGetShaderBinaryDataEXT = nullptr; +#else + void * fp_vkGetShaderBinaryDataEXT{}; +#endif +#if (defined(VK_AMD_shader_info)) + PFN_vkGetShaderInfoAMD fp_vkGetShaderInfoAMD = nullptr; +#else + void * fp_vkGetShaderInfoAMD{}; +#endif +#if (defined(VK_EXT_shader_module_identifier)) + PFN_vkGetShaderModuleCreateInfoIdentifierEXT fp_vkGetShaderModuleCreateInfoIdentifierEXT = nullptr; +#else + void * fp_vkGetShaderModuleCreateInfoIdentifierEXT{}; +#endif +#if (defined(VK_EXT_shader_module_identifier)) + PFN_vkGetShaderModuleIdentifierEXT fp_vkGetShaderModuleIdentifierEXT = nullptr; +#else + void * fp_vkGetShaderModuleIdentifierEXT{}; +#endif +#if (defined(VK_EXT_display_control)) + PFN_vkGetSwapchainCounterEXT fp_vkGetSwapchainCounterEXT = nullptr; +#else + void * fp_vkGetSwapchainCounterEXT{}; +#endif +#if (defined(VK_OHOS_native_buffer)) + PFN_vkGetSwapchainGrallocUsageOHOS fp_vkGetSwapchainGrallocUsageOHOS = nullptr; +#else + void * fp_vkGetSwapchainGrallocUsageOHOS{}; +#endif +#if (defined(VK_KHR_swapchain)) + PFN_vkGetSwapchainImagesKHR fp_vkGetSwapchainImagesKHR = nullptr; +#else + void * fp_vkGetSwapchainImagesKHR{}; +#endif +#if (defined(VK_KHR_shared_presentable_image)) + PFN_vkGetSwapchainStatusKHR fp_vkGetSwapchainStatusKHR = nullptr; +#else + void * fp_vkGetSwapchainStatusKHR{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkGetTensorMemoryRequirementsARM fp_vkGetTensorMemoryRequirementsARM = nullptr; +#else + void * fp_vkGetTensorMemoryRequirementsARM{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkGetTensorOpaqueCaptureDescriptorDataARM fp_vkGetTensorOpaqueCaptureDescriptorDataARM = nullptr; +#else + void * fp_vkGetTensorOpaqueCaptureDescriptorDataARM{}; +#endif +#if (defined(VK_ARM_tensors)) + PFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM fp_vkGetTensorViewOpaqueCaptureDescriptorDataARM = nullptr; +#else + void * fp_vkGetTensorViewOpaqueCaptureDescriptorDataARM{}; +#endif +#if (defined(VK_EXT_validation_cache)) + PFN_vkGetValidationCacheDataEXT fp_vkGetValidationCacheDataEXT = nullptr; +#else + void * fp_vkGetValidationCacheDataEXT{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkGetVideoSessionMemoryRequirementsKHR fp_vkGetVideoSessionMemoryRequirementsKHR = nullptr; +#else + void * fp_vkGetVideoSessionMemoryRequirementsKHR{}; +#endif +#if (defined(VK_KHR_external_fence_fd)) + PFN_vkImportFenceFdKHR fp_vkImportFenceFdKHR = nullptr; +#else + void * fp_vkImportFenceFdKHR{}; +#endif +#if (defined(VK_KHR_external_fence_win32)) + PFN_vkImportFenceWin32HandleKHR fp_vkImportFenceWin32HandleKHR = nullptr; +#else + void * fp_vkImportFenceWin32HandleKHR{}; +#endif +#if (defined(VK_KHR_external_semaphore_fd)) + PFN_vkImportSemaphoreFdKHR fp_vkImportSemaphoreFdKHR = nullptr; +#else + void * fp_vkImportSemaphoreFdKHR{}; +#endif +#if (defined(VK_KHR_external_semaphore_win32)) + PFN_vkImportSemaphoreWin32HandleKHR fp_vkImportSemaphoreWin32HandleKHR = nullptr; +#else + void * fp_vkImportSemaphoreWin32HandleKHR{}; +#endif +#if (defined(VK_FUCHSIA_external_semaphore)) + PFN_vkImportSemaphoreZirconHandleFUCHSIA fp_vkImportSemaphoreZirconHandleFUCHSIA = nullptr; +#else + void * fp_vkImportSemaphoreZirconHandleFUCHSIA{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkInitializePerformanceApiINTEL fp_vkInitializePerformanceApiINTEL = nullptr; +#else + void * fp_vkInitializePerformanceApiINTEL{}; +#endif + PFN_vkInvalidateMappedMemoryRanges fp_vkInvalidateMappedMemoryRanges = nullptr; +#if (defined(VK_NV_low_latency2)) + PFN_vkLatencySleepNV fp_vkLatencySleepNV = nullptr; +#else + void * fp_vkLatencySleepNV{}; +#endif + PFN_vkMapMemory fp_vkMapMemory = nullptr; +#if (defined(VK_VERSION_1_4)) + PFN_vkMapMemory2 fp_vkMapMemory2 = nullptr; +#else + void * fp_vkMapMemory2{}; +#endif +#if (defined(VK_KHR_map_memory2)) + PFN_vkMapMemory2KHR fp_vkMapMemory2KHR = nullptr; +#else + void * fp_vkMapMemory2KHR{}; +#endif + PFN_vkMergePipelineCaches fp_vkMergePipelineCaches = nullptr; +#if (defined(VK_EXT_validation_cache)) + PFN_vkMergeValidationCachesEXT fp_vkMergeValidationCachesEXT = nullptr; +#else + void * fp_vkMergeValidationCachesEXT{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkQueueBeginDebugUtilsLabelEXT fp_vkQueueBeginDebugUtilsLabelEXT = nullptr; +#else + void * fp_vkQueueBeginDebugUtilsLabelEXT{}; +#endif + PFN_vkQueueBindSparse fp_vkQueueBindSparse = nullptr; +#if (defined(VK_EXT_debug_utils)) + PFN_vkQueueEndDebugUtilsLabelEXT fp_vkQueueEndDebugUtilsLabelEXT = nullptr; +#else + void * fp_vkQueueEndDebugUtilsLabelEXT{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkQueueInsertDebugUtilsLabelEXT fp_vkQueueInsertDebugUtilsLabelEXT = nullptr; +#else + void * fp_vkQueueInsertDebugUtilsLabelEXT{}; +#endif +#if (defined(VK_NV_low_latency2)) + PFN_vkQueueNotifyOutOfBandNV fp_vkQueueNotifyOutOfBandNV = nullptr; +#else + void * fp_vkQueueNotifyOutOfBandNV{}; +#endif +#if (defined(VK_KHR_swapchain)) + PFN_vkQueuePresentKHR fp_vkQueuePresentKHR = nullptr; +#else + void * fp_vkQueuePresentKHR{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkQueueSetPerformanceConfigurationINTEL fp_vkQueueSetPerformanceConfigurationINTEL = nullptr; +#else + void * fp_vkQueueSetPerformanceConfigurationINTEL{}; +#endif +#if (defined(VK_OHOS_native_buffer)) + PFN_vkQueueSignalReleaseImageOHOS fp_vkQueueSignalReleaseImageOHOS = nullptr; +#else + void * fp_vkQueueSignalReleaseImageOHOS{}; +#endif + PFN_vkQueueSubmit fp_vkQueueSubmit = nullptr; +#if (defined(VK_VERSION_1_3)) + PFN_vkQueueSubmit2 fp_vkQueueSubmit2 = nullptr; +#else + void * fp_vkQueueSubmit2{}; +#endif +#if (defined(VK_KHR_synchronization2)) + PFN_vkQueueSubmit2KHR fp_vkQueueSubmit2KHR = nullptr; +#else + void * fp_vkQueueSubmit2KHR{}; +#endif + PFN_vkQueueWaitIdle fp_vkQueueWaitIdle = nullptr; +#if (defined(VK_EXT_display_control)) + PFN_vkRegisterDeviceEventEXT fp_vkRegisterDeviceEventEXT = nullptr; +#else + void * fp_vkRegisterDeviceEventEXT{}; +#endif +#if (defined(VK_EXT_display_control)) + PFN_vkRegisterDisplayEventEXT fp_vkRegisterDisplayEventEXT = nullptr; +#else + void * fp_vkRegisterDisplayEventEXT{}; +#endif +#if (defined(VK_KHR_pipeline_binary)) + PFN_vkReleaseCapturedPipelineDataKHR fp_vkReleaseCapturedPipelineDataKHR = nullptr; +#else + void * fp_vkReleaseCapturedPipelineDataKHR{}; +#endif +#if (defined(VK_EXT_full_screen_exclusive)) + PFN_vkReleaseFullScreenExclusiveModeEXT fp_vkReleaseFullScreenExclusiveModeEXT = nullptr; +#else + void * fp_vkReleaseFullScreenExclusiveModeEXT{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkReleasePerformanceConfigurationINTEL fp_vkReleasePerformanceConfigurationINTEL = nullptr; +#else + void * fp_vkReleasePerformanceConfigurationINTEL{}; +#endif +#if (defined(VK_KHR_performance_query)) + PFN_vkReleaseProfilingLockKHR fp_vkReleaseProfilingLockKHR = nullptr; +#else + void * fp_vkReleaseProfilingLockKHR{}; +#endif +#if (defined(VK_EXT_swapchain_maintenance1)) + PFN_vkReleaseSwapchainImagesEXT fp_vkReleaseSwapchainImagesEXT = nullptr; +#else + void * fp_vkReleaseSwapchainImagesEXT{}; +#endif +#if (defined(VK_KHR_swapchain_maintenance1)) + PFN_vkReleaseSwapchainImagesKHR fp_vkReleaseSwapchainImagesKHR = nullptr; +#else + void * fp_vkReleaseSwapchainImagesKHR{}; +#endif + PFN_vkResetCommandBuffer fp_vkResetCommandBuffer = nullptr; + PFN_vkResetCommandPool fp_vkResetCommandPool = nullptr; + PFN_vkResetDescriptorPool fp_vkResetDescriptorPool = nullptr; + PFN_vkResetEvent fp_vkResetEvent = nullptr; + PFN_vkResetFences fp_vkResetFences = nullptr; +#if (defined(VK_VERSION_1_2)) + PFN_vkResetQueryPool fp_vkResetQueryPool = nullptr; +#else + void * fp_vkResetQueryPool{}; +#endif +#if (defined(VK_EXT_host_query_reset)) + PFN_vkResetQueryPoolEXT fp_vkResetQueryPoolEXT = nullptr; +#else + void * fp_vkResetQueryPoolEXT{}; +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA fp_vkSetBufferCollectionBufferConstraintsFUCHSIA = nullptr; +#else + void * fp_vkSetBufferCollectionBufferConstraintsFUCHSIA{}; +#endif +#if (defined(VK_FUCHSIA_buffer_collection)) + PFN_vkSetBufferCollectionImageConstraintsFUCHSIA fp_vkSetBufferCollectionImageConstraintsFUCHSIA = nullptr; +#else + void * fp_vkSetBufferCollectionImageConstraintsFUCHSIA{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkSetDebugUtilsObjectNameEXT fp_vkSetDebugUtilsObjectNameEXT = nullptr; +#else + void * fp_vkSetDebugUtilsObjectNameEXT{}; +#endif +#if (defined(VK_EXT_debug_utils)) + PFN_vkSetDebugUtilsObjectTagEXT fp_vkSetDebugUtilsObjectTagEXT = nullptr; +#else + void * fp_vkSetDebugUtilsObjectTagEXT{}; +#endif +#if (defined(VK_EXT_pageable_device_local_memory)) + PFN_vkSetDeviceMemoryPriorityEXT fp_vkSetDeviceMemoryPriorityEXT = nullptr; +#else + void * fp_vkSetDeviceMemoryPriorityEXT{}; +#endif + PFN_vkSetEvent fp_vkSetEvent = nullptr; +#if (defined(VK_EXT_hdr_metadata)) + PFN_vkSetHdrMetadataEXT fp_vkSetHdrMetadataEXT = nullptr; +#else + void * fp_vkSetHdrMetadataEXT{}; +#endif +#if (defined(VK_NV_low_latency2)) + PFN_vkSetLatencyMarkerNV fp_vkSetLatencyMarkerNV = nullptr; +#else + void * fp_vkSetLatencyMarkerNV{}; +#endif +#if (defined(VK_NV_low_latency2)) + PFN_vkSetLatencySleepModeNV fp_vkSetLatencySleepModeNV = nullptr; +#else + void * fp_vkSetLatencySleepModeNV{}; +#endif +#if (defined(VK_AMD_display_native_hdr)) + PFN_vkSetLocalDimmingAMD fp_vkSetLocalDimmingAMD = nullptr; +#else + void * fp_vkSetLocalDimmingAMD{}; +#endif +#if (defined(VK_VERSION_1_3)) + PFN_vkSetPrivateData fp_vkSetPrivateData = nullptr; +#else + void * fp_vkSetPrivateData{}; +#endif +#if (defined(VK_EXT_private_data)) + PFN_vkSetPrivateDataEXT fp_vkSetPrivateDataEXT = nullptr; +#else + void * fp_vkSetPrivateDataEXT{}; +#endif +#if (defined(VK_VERSION_1_2)) + PFN_vkSignalSemaphore fp_vkSignalSemaphore = nullptr; +#else + void * fp_vkSignalSemaphore{}; +#endif +#if (defined(VK_KHR_timeline_semaphore)) + PFN_vkSignalSemaphoreKHR fp_vkSignalSemaphoreKHR = nullptr; +#else + void * fp_vkSignalSemaphoreKHR{}; +#endif +#if (defined(VK_VERSION_1_4)) + PFN_vkTransitionImageLayout fp_vkTransitionImageLayout = nullptr; +#else + void * fp_vkTransitionImageLayout{}; +#endif +#if (defined(VK_EXT_host_image_copy)) + PFN_vkTransitionImageLayoutEXT fp_vkTransitionImageLayoutEXT = nullptr; +#else + void * fp_vkTransitionImageLayoutEXT{}; +#endif +#if (defined(VK_VERSION_1_1)) + PFN_vkTrimCommandPool fp_vkTrimCommandPool = nullptr; +#else + void * fp_vkTrimCommandPool{}; +#endif +#if (defined(VK_KHR_maintenance1)) + PFN_vkTrimCommandPoolKHR fp_vkTrimCommandPoolKHR = nullptr; +#else + void * fp_vkTrimCommandPoolKHR{}; +#endif +#if (defined(VK_INTEL_performance_query)) + PFN_vkUninitializePerformanceApiINTEL fp_vkUninitializePerformanceApiINTEL = nullptr; +#else + void * fp_vkUninitializePerformanceApiINTEL{}; +#endif + PFN_vkUnmapMemory fp_vkUnmapMemory = nullptr; +#if (defined(VK_VERSION_1_4)) + PFN_vkUnmapMemory2 fp_vkUnmapMemory2 = nullptr; +#else + void * fp_vkUnmapMemory2{}; +#endif +#if (defined(VK_KHR_map_memory2)) + PFN_vkUnmapMemory2KHR fp_vkUnmapMemory2KHR = nullptr; +#else + void * fp_vkUnmapMemory2KHR{}; +#endif +#if (defined(VK_VERSION_1_1)) + PFN_vkUpdateDescriptorSetWithTemplate fp_vkUpdateDescriptorSetWithTemplate = nullptr; +#else + void * fp_vkUpdateDescriptorSetWithTemplate{}; +#endif +#if (defined(VK_KHR_descriptor_update_template)) + PFN_vkUpdateDescriptorSetWithTemplateKHR fp_vkUpdateDescriptorSetWithTemplateKHR = nullptr; +#else + void * fp_vkUpdateDescriptorSetWithTemplateKHR{}; +#endif + PFN_vkUpdateDescriptorSets fp_vkUpdateDescriptorSets = nullptr; +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkUpdateIndirectExecutionSetPipelineEXT fp_vkUpdateIndirectExecutionSetPipelineEXT = nullptr; +#else + void * fp_vkUpdateIndirectExecutionSetPipelineEXT{}; +#endif +#if (defined(VK_EXT_device_generated_commands)) + PFN_vkUpdateIndirectExecutionSetShaderEXT fp_vkUpdateIndirectExecutionSetShaderEXT = nullptr; +#else + void * fp_vkUpdateIndirectExecutionSetShaderEXT{}; +#endif +#if (defined(VK_KHR_video_queue)) + PFN_vkUpdateVideoSessionParametersKHR fp_vkUpdateVideoSessionParametersKHR = nullptr; +#else + void * fp_vkUpdateVideoSessionParametersKHR{}; +#endif + PFN_vkWaitForFences fp_vkWaitForFences = nullptr; +#if (defined(VK_KHR_present_wait2)) + PFN_vkWaitForPresent2KHR fp_vkWaitForPresent2KHR = nullptr; +#else + void * fp_vkWaitForPresent2KHR{}; +#endif +#if (defined(VK_KHR_present_wait)) + PFN_vkWaitForPresentKHR fp_vkWaitForPresentKHR = nullptr; +#else + void * fp_vkWaitForPresentKHR{}; +#endif +#if (defined(VK_VERSION_1_2)) + PFN_vkWaitSemaphores fp_vkWaitSemaphores = nullptr; +#else + void * fp_vkWaitSemaphores{}; +#endif +#if (defined(VK_KHR_timeline_semaphore)) + PFN_vkWaitSemaphoresKHR fp_vkWaitSemaphoresKHR = nullptr; +#else + void * fp_vkWaitSemaphoresKHR{}; +#endif +#if (defined(VK_KHR_acceleration_structure)) + PFN_vkWriteAccelerationStructuresPropertiesKHR fp_vkWriteAccelerationStructuresPropertiesKHR = nullptr; +#else + void * fp_vkWriteAccelerationStructuresPropertiesKHR{}; +#endif +#if (defined(VK_EXT_opacity_micromap)) + PFN_vkWriteMicromapsPropertiesEXT fp_vkWriteMicromapsPropertiesEXT = nullptr; +#else + void * fp_vkWriteMicromapsPropertiesEXT{}; +#endif + bool is_populated() const { return populated; } + VkDevice device = VK_NULL_HANDLE; +private: + bool populated = false; +}; + +} // namespace vkb \ No newline at end of file diff --git a/extern/vk-bootstrap/src/VkBootstrapFeatureChain.h b/extern/vk-bootstrap/src/VkBootstrapFeatureChain.h new file mode 100644 index 0000000000..39bb741a06 --- /dev/null +++ b/extern/vk-bootstrap/src/VkBootstrapFeatureChain.h @@ -0,0 +1,1238 @@ +/* + * Copyright © 2025 Charles Giessen (charles@lunarg.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the “Software”), to deal in the Software without restriction, including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +// This file is a part of VkBootstrap +// https://github.com/charles-lunarg/vk-bootstrap + +#pragma once +#include +#include +#include +#if defined(VK_ENABLE_BETA_EXTENSIONS) +#include +#endif // defined(VK_ENABLE_BETA_EXTENSIONS) +#if defined(VK_USE_PLATFORM_ANDROID_KHR) +#include +#endif // defined(VK_USE_PLATFORM_ANDROID_KHR) +#if defined(VK_USE_PLATFORM_SCREEN_QNX) +#include +#include +#endif // defined(VK_USE_PLATFORM_SCREEN_QNX) + +namespace vkb::detail { + +void compare_VkPhysicalDeviceFeatures(std::vector & error_list, VkPhysicalDeviceFeatures const& supported, VkPhysicalDeviceFeatures const& requested); +void merge_VkPhysicalDeviceFeatures(VkPhysicalDeviceFeatures & current, VkPhysicalDeviceFeatures const& merge_in); +#if (defined(VK_VERSION_1_1)) +void compare_VkPhysicalDeviceProtectedMemoryFeatures(std::vector & error_list, VkPhysicalDeviceProtectedMemoryFeatures const& supported, VkPhysicalDeviceProtectedMemoryFeatures const& requested); +void merge_VkPhysicalDeviceProtectedMemoryFeatures(VkPhysicalDeviceProtectedMemoryFeatures & current, VkPhysicalDeviceProtectedMemoryFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_1)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) +void compare_VkPhysicalDevice16BitStorageFeatures(std::vector & error_list, VkPhysicalDevice16BitStorageFeatures const& supported, VkPhysicalDevice16BitStorageFeatures const& requested); +void merge_VkPhysicalDevice16BitStorageFeatures(VkPhysicalDevice16BitStorageFeatures & current, VkPhysicalDevice16BitStorageFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) +void compare_VkPhysicalDevice16BitStorageFeaturesKHR(std::vector & error_list, VkPhysicalDevice16BitStorageFeaturesKHR const& supported, VkPhysicalDevice16BitStorageFeaturesKHR const& requested); +void merge_VkPhysicalDevice16BitStorageFeaturesKHR(VkPhysicalDevice16BitStorageFeaturesKHR & current, VkPhysicalDevice16BitStorageFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +void compare_VkPhysicalDeviceVariablePointersFeatures(std::vector & error_list, VkPhysicalDeviceVariablePointersFeatures const& supported, VkPhysicalDeviceVariablePointersFeatures const& requested); +void merge_VkPhysicalDeviceVariablePointersFeatures(VkPhysicalDeviceVariablePointersFeatures & current, VkPhysicalDeviceVariablePointersFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +void compare_VkPhysicalDeviceVariablePointerFeatures(std::vector & error_list, VkPhysicalDeviceVariablePointerFeatures const& supported, VkPhysicalDeviceVariablePointerFeatures const& requested); +void merge_VkPhysicalDeviceVariablePointerFeatures(VkPhysicalDeviceVariablePointerFeatures & current, VkPhysicalDeviceVariablePointerFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +void compare_VkPhysicalDeviceVariablePointerFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVariablePointerFeaturesKHR const& supported, VkPhysicalDeviceVariablePointerFeaturesKHR const& requested); +void merge_VkPhysicalDeviceVariablePointerFeaturesKHR(VkPhysicalDeviceVariablePointerFeaturesKHR & current, VkPhysicalDeviceVariablePointerFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +void compare_VkPhysicalDeviceVariablePointersFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVariablePointersFeaturesKHR const& supported, VkPhysicalDeviceVariablePointersFeaturesKHR const& requested); +void merge_VkPhysicalDeviceVariablePointersFeaturesKHR(VkPhysicalDeviceVariablePointersFeaturesKHR & current, VkPhysicalDeviceVariablePointersFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) +void compare_VkPhysicalDeviceSamplerYcbcrConversionFeatures(std::vector & error_list, VkPhysicalDeviceSamplerYcbcrConversionFeatures const& supported, VkPhysicalDeviceSamplerYcbcrConversionFeatures const& requested); +void merge_VkPhysicalDeviceSamplerYcbcrConversionFeatures(VkPhysicalDeviceSamplerYcbcrConversionFeatures & current, VkPhysicalDeviceSamplerYcbcrConversionFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) +void compare_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR(std::vector & error_list, VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR const& supported, VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR const& requested); +void merge_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR(VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR & current, VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) +void compare_VkPhysicalDeviceMultiviewFeatures(std::vector & error_list, VkPhysicalDeviceMultiviewFeatures const& supported, VkPhysicalDeviceMultiviewFeatures const& requested); +void merge_VkPhysicalDeviceMultiviewFeatures(VkPhysicalDeviceMultiviewFeatures & current, VkPhysicalDeviceMultiviewFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) +void compare_VkPhysicalDeviceMultiviewFeaturesKHR(std::vector & error_list, VkPhysicalDeviceMultiviewFeaturesKHR const& supported, VkPhysicalDeviceMultiviewFeaturesKHR const& requested); +void merge_VkPhysicalDeviceMultiviewFeaturesKHR(VkPhysicalDeviceMultiviewFeaturesKHR & current, VkPhysicalDeviceMultiviewFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) +#if (defined(VK_VERSION_1_1)) +void compare_VkPhysicalDeviceShaderDrawParametersFeatures(std::vector & error_list, VkPhysicalDeviceShaderDrawParametersFeatures const& supported, VkPhysicalDeviceShaderDrawParametersFeatures const& requested); +void merge_VkPhysicalDeviceShaderDrawParametersFeatures(VkPhysicalDeviceShaderDrawParametersFeatures & current, VkPhysicalDeviceShaderDrawParametersFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_1)) +#if (defined(VK_VERSION_1_1)) +void compare_VkPhysicalDeviceShaderDrawParameterFeatures(std::vector & error_list, VkPhysicalDeviceShaderDrawParameterFeatures const& supported, VkPhysicalDeviceShaderDrawParameterFeatures const& requested); +void merge_VkPhysicalDeviceShaderDrawParameterFeatures(VkPhysicalDeviceShaderDrawParameterFeatures & current, VkPhysicalDeviceShaderDrawParameterFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_1)) +#if (defined(VK_VERSION_1_2)) +void compare_VkPhysicalDeviceVulkan11Features(std::vector & error_list, VkPhysicalDeviceVulkan11Features const& supported, VkPhysicalDeviceVulkan11Features const& requested); +void merge_VkPhysicalDeviceVulkan11Features(VkPhysicalDeviceVulkan11Features & current, VkPhysicalDeviceVulkan11Features const& merge_in); +#endif //(defined(VK_VERSION_1_2)) +#if (defined(VK_VERSION_1_2)) +void compare_VkPhysicalDeviceVulkan12Features(std::vector & error_list, VkPhysicalDeviceVulkan12Features const& supported, VkPhysicalDeviceVulkan12Features const& requested); +void merge_VkPhysicalDeviceVulkan12Features(VkPhysicalDeviceVulkan12Features & current, VkPhysicalDeviceVulkan12Features const& merge_in); +#endif //(defined(VK_VERSION_1_2)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) +void compare_VkPhysicalDeviceVulkanMemoryModelFeatures(std::vector & error_list, VkPhysicalDeviceVulkanMemoryModelFeatures const& supported, VkPhysicalDeviceVulkanMemoryModelFeatures const& requested); +void merge_VkPhysicalDeviceVulkanMemoryModelFeatures(VkPhysicalDeviceVulkanMemoryModelFeatures & current, VkPhysicalDeviceVulkanMemoryModelFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) +void compare_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVulkanMemoryModelFeaturesKHR const& supported, VkPhysicalDeviceVulkanMemoryModelFeaturesKHR const& requested); +void merge_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR(VkPhysicalDeviceVulkanMemoryModelFeaturesKHR & current, VkPhysicalDeviceVulkanMemoryModelFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) +void compare_VkPhysicalDeviceHostQueryResetFeatures(std::vector & error_list, VkPhysicalDeviceHostQueryResetFeatures const& supported, VkPhysicalDeviceHostQueryResetFeatures const& requested); +void merge_VkPhysicalDeviceHostQueryResetFeatures(VkPhysicalDeviceHostQueryResetFeatures & current, VkPhysicalDeviceHostQueryResetFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) +void compare_VkPhysicalDeviceHostQueryResetFeaturesEXT(std::vector & error_list, VkPhysicalDeviceHostQueryResetFeaturesEXT const& supported, VkPhysicalDeviceHostQueryResetFeaturesEXT const& requested); +void merge_VkPhysicalDeviceHostQueryResetFeaturesEXT(VkPhysicalDeviceHostQueryResetFeaturesEXT & current, VkPhysicalDeviceHostQueryResetFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) +void compare_VkPhysicalDeviceTimelineSemaphoreFeatures(std::vector & error_list, VkPhysicalDeviceTimelineSemaphoreFeatures const& supported, VkPhysicalDeviceTimelineSemaphoreFeatures const& requested); +void merge_VkPhysicalDeviceTimelineSemaphoreFeatures(VkPhysicalDeviceTimelineSemaphoreFeatures & current, VkPhysicalDeviceTimelineSemaphoreFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) +void compare_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR(std::vector & error_list, VkPhysicalDeviceTimelineSemaphoreFeaturesKHR const& supported, VkPhysicalDeviceTimelineSemaphoreFeaturesKHR const& requested); +void merge_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR(VkPhysicalDeviceTimelineSemaphoreFeaturesKHR & current, VkPhysicalDeviceTimelineSemaphoreFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) +void compare_VkPhysicalDeviceBufferDeviceAddressFeatures(std::vector & error_list, VkPhysicalDeviceBufferDeviceAddressFeatures const& supported, VkPhysicalDeviceBufferDeviceAddressFeatures const& requested); +void merge_VkPhysicalDeviceBufferDeviceAddressFeatures(VkPhysicalDeviceBufferDeviceAddressFeatures & current, VkPhysicalDeviceBufferDeviceAddressFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) +void compare_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR(std::vector & error_list, VkPhysicalDeviceBufferDeviceAddressFeaturesKHR const& supported, VkPhysicalDeviceBufferDeviceAddressFeaturesKHR const& requested); +void merge_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR(VkPhysicalDeviceBufferDeviceAddressFeaturesKHR & current, VkPhysicalDeviceBufferDeviceAddressFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) +void compare_VkPhysicalDevice8BitStorageFeatures(std::vector & error_list, VkPhysicalDevice8BitStorageFeatures const& supported, VkPhysicalDevice8BitStorageFeatures const& requested); +void merge_VkPhysicalDevice8BitStorageFeatures(VkPhysicalDevice8BitStorageFeatures & current, VkPhysicalDevice8BitStorageFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) +void compare_VkPhysicalDevice8BitStorageFeaturesKHR(std::vector & error_list, VkPhysicalDevice8BitStorageFeaturesKHR const& supported, VkPhysicalDevice8BitStorageFeaturesKHR const& requested); +void merge_VkPhysicalDevice8BitStorageFeaturesKHR(VkPhysicalDevice8BitStorageFeaturesKHR & current, VkPhysicalDevice8BitStorageFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) +void compare_VkPhysicalDeviceShaderAtomicInt64Features(std::vector & error_list, VkPhysicalDeviceShaderAtomicInt64Features const& supported, VkPhysicalDeviceShaderAtomicInt64Features const& requested); +void merge_VkPhysicalDeviceShaderAtomicInt64Features(VkPhysicalDeviceShaderAtomicInt64Features & current, VkPhysicalDeviceShaderAtomicInt64Features const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) +void compare_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderAtomicInt64FeaturesKHR const& supported, VkPhysicalDeviceShaderAtomicInt64FeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR(VkPhysicalDeviceShaderAtomicInt64FeaturesKHR & current, VkPhysicalDeviceShaderAtomicInt64FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +void compare_VkPhysicalDeviceShaderFloat16Int8Features(std::vector & error_list, VkPhysicalDeviceShaderFloat16Int8Features const& supported, VkPhysicalDeviceShaderFloat16Int8Features const& requested); +void merge_VkPhysicalDeviceShaderFloat16Int8Features(VkPhysicalDeviceShaderFloat16Int8Features & current, VkPhysicalDeviceShaderFloat16Int8Features const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +void compare_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderFloat16Int8FeaturesKHR const& supported, VkPhysicalDeviceShaderFloat16Int8FeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR(VkPhysicalDeviceShaderFloat16Int8FeaturesKHR & current, VkPhysicalDeviceShaderFloat16Int8FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +void compare_VkPhysicalDeviceFloat16Int8FeaturesKHR(std::vector & error_list, VkPhysicalDeviceFloat16Int8FeaturesKHR const& supported, VkPhysicalDeviceFloat16Int8FeaturesKHR const& requested); +void merge_VkPhysicalDeviceFloat16Int8FeaturesKHR(VkPhysicalDeviceFloat16Int8FeaturesKHR & current, VkPhysicalDeviceFloat16Int8FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) +void compare_VkPhysicalDeviceDescriptorIndexingFeatures(std::vector & error_list, VkPhysicalDeviceDescriptorIndexingFeatures const& supported, VkPhysicalDeviceDescriptorIndexingFeatures const& requested); +void merge_VkPhysicalDeviceDescriptorIndexingFeatures(VkPhysicalDeviceDescriptorIndexingFeatures & current, VkPhysicalDeviceDescriptorIndexingFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) +void compare_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDescriptorIndexingFeaturesEXT const& supported, VkPhysicalDeviceDescriptorIndexingFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(VkPhysicalDeviceDescriptorIndexingFeaturesEXT & current, VkPhysicalDeviceDescriptorIndexingFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) +void compare_VkPhysicalDeviceScalarBlockLayoutFeatures(std::vector & error_list, VkPhysicalDeviceScalarBlockLayoutFeatures const& supported, VkPhysicalDeviceScalarBlockLayoutFeatures const& requested); +void merge_VkPhysicalDeviceScalarBlockLayoutFeatures(VkPhysicalDeviceScalarBlockLayoutFeatures & current, VkPhysicalDeviceScalarBlockLayoutFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) +void compare_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT(std::vector & error_list, VkPhysicalDeviceScalarBlockLayoutFeaturesEXT const& supported, VkPhysicalDeviceScalarBlockLayoutFeaturesEXT const& requested); +void merge_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT(VkPhysicalDeviceScalarBlockLayoutFeaturesEXT & current, VkPhysicalDeviceScalarBlockLayoutFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) +void compare_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(std::vector & error_list, VkPhysicalDeviceUniformBufferStandardLayoutFeatures const& supported, VkPhysicalDeviceUniformBufferStandardLayoutFeatures const& requested); +void merge_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(VkPhysicalDeviceUniformBufferStandardLayoutFeatures & current, VkPhysicalDeviceUniformBufferStandardLayoutFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) +void compare_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR(std::vector & error_list, VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR const& supported, VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR const& requested); +void merge_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR(VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR & current, VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) +void compare_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(std::vector & error_list, VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const& supported, VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const& requested); +void merge_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures & current, VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) +void compare_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR const& supported, VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR(VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR & current, VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) +void compare_VkPhysicalDeviceImagelessFramebufferFeatures(std::vector & error_list, VkPhysicalDeviceImagelessFramebufferFeatures const& supported, VkPhysicalDeviceImagelessFramebufferFeatures const& requested); +void merge_VkPhysicalDeviceImagelessFramebufferFeatures(VkPhysicalDeviceImagelessFramebufferFeatures & current, VkPhysicalDeviceImagelessFramebufferFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) +void compare_VkPhysicalDeviceImagelessFramebufferFeaturesKHR(std::vector & error_list, VkPhysicalDeviceImagelessFramebufferFeaturesKHR const& supported, VkPhysicalDeviceImagelessFramebufferFeaturesKHR const& requested); +void merge_VkPhysicalDeviceImagelessFramebufferFeaturesKHR(VkPhysicalDeviceImagelessFramebufferFeaturesKHR & current, VkPhysicalDeviceImagelessFramebufferFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) +void compare_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(std::vector & error_list, VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const& supported, VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const& requested); +void merge_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures & current, VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) +void compare_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR(std::vector & error_list, VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR const& supported, VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR const& requested); +void merge_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR(VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR & current, VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) +#if (defined(VK_VERSION_1_3)) +void compare_VkPhysicalDeviceVulkan13Features(std::vector & error_list, VkPhysicalDeviceVulkan13Features const& supported, VkPhysicalDeviceVulkan13Features const& requested); +void merge_VkPhysicalDeviceVulkan13Features(VkPhysicalDeviceVulkan13Features & current, VkPhysicalDeviceVulkan13Features const& merge_in); +#endif //(defined(VK_VERSION_1_3)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) +void compare_VkPhysicalDevicePrivateDataFeatures(std::vector & error_list, VkPhysicalDevicePrivateDataFeatures const& supported, VkPhysicalDevicePrivateDataFeatures const& requested); +void merge_VkPhysicalDevicePrivateDataFeatures(VkPhysicalDevicePrivateDataFeatures & current, VkPhysicalDevicePrivateDataFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) +void compare_VkPhysicalDevicePrivateDataFeaturesEXT(std::vector & error_list, VkPhysicalDevicePrivateDataFeaturesEXT const& supported, VkPhysicalDevicePrivateDataFeaturesEXT const& requested); +void merge_VkPhysicalDevicePrivateDataFeaturesEXT(VkPhysicalDevicePrivateDataFeaturesEXT & current, VkPhysicalDevicePrivateDataFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) +void compare_VkPhysicalDeviceSynchronization2Features(std::vector & error_list, VkPhysicalDeviceSynchronization2Features const& supported, VkPhysicalDeviceSynchronization2Features const& requested); +void merge_VkPhysicalDeviceSynchronization2Features(VkPhysicalDeviceSynchronization2Features & current, VkPhysicalDeviceSynchronization2Features const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) +void compare_VkPhysicalDeviceSynchronization2FeaturesKHR(std::vector & error_list, VkPhysicalDeviceSynchronization2FeaturesKHR const& supported, VkPhysicalDeviceSynchronization2FeaturesKHR const& requested); +void merge_VkPhysicalDeviceSynchronization2FeaturesKHR(VkPhysicalDeviceSynchronization2FeaturesKHR & current, VkPhysicalDeviceSynchronization2FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) +void compare_VkPhysicalDeviceTextureCompressionASTCHDRFeatures(std::vector & error_list, VkPhysicalDeviceTextureCompressionASTCHDRFeatures const& supported, VkPhysicalDeviceTextureCompressionASTCHDRFeatures const& requested); +void merge_VkPhysicalDeviceTextureCompressionASTCHDRFeatures(VkPhysicalDeviceTextureCompressionASTCHDRFeatures & current, VkPhysicalDeviceTextureCompressionASTCHDRFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) +void compare_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(std::vector & error_list, VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& supported, VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& requested); +void merge_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT & current, VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) +void compare_VkPhysicalDeviceMaintenance4Features(std::vector & error_list, VkPhysicalDeviceMaintenance4Features const& supported, VkPhysicalDeviceMaintenance4Features const& requested); +void merge_VkPhysicalDeviceMaintenance4Features(VkPhysicalDeviceMaintenance4Features & current, VkPhysicalDeviceMaintenance4Features const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) +void compare_VkPhysicalDeviceMaintenance4FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance4FeaturesKHR const& supported, VkPhysicalDeviceMaintenance4FeaturesKHR const& requested); +void merge_VkPhysicalDeviceMaintenance4FeaturesKHR(VkPhysicalDeviceMaintenance4FeaturesKHR & current, VkPhysicalDeviceMaintenance4FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) +void compare_VkPhysicalDeviceShaderTerminateInvocationFeatures(std::vector & error_list, VkPhysicalDeviceShaderTerminateInvocationFeatures const& supported, VkPhysicalDeviceShaderTerminateInvocationFeatures const& requested); +void merge_VkPhysicalDeviceShaderTerminateInvocationFeatures(VkPhysicalDeviceShaderTerminateInvocationFeatures & current, VkPhysicalDeviceShaderTerminateInvocationFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) +void compare_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const& supported, VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR & current, VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) +void compare_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures(std::vector & error_list, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures const& supported, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures const& requested); +void merge_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures(VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures & current, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) +void compare_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& supported, VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT & current, VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) +void compare_VkPhysicalDevicePipelineCreationCacheControlFeatures(std::vector & error_list, VkPhysicalDevicePipelineCreationCacheControlFeatures const& supported, VkPhysicalDevicePipelineCreationCacheControlFeatures const& requested); +void merge_VkPhysicalDevicePipelineCreationCacheControlFeatures(VkPhysicalDevicePipelineCreationCacheControlFeatures & current, VkPhysicalDevicePipelineCreationCacheControlFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) +void compare_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const& supported, VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const& requested); +void merge_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT & current, VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) +void compare_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures(std::vector & error_list, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures const& supported, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures const& requested); +void merge_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures & current, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) +void compare_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR(std::vector & error_list, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR const& supported, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR const& requested); +void merge_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR & current, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) +void compare_VkPhysicalDeviceImageRobustnessFeatures(std::vector & error_list, VkPhysicalDeviceImageRobustnessFeatures const& supported, VkPhysicalDeviceImageRobustnessFeatures const& requested); +void merge_VkPhysicalDeviceImageRobustnessFeatures(VkPhysicalDeviceImageRobustnessFeatures & current, VkPhysicalDeviceImageRobustnessFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) +void compare_VkPhysicalDeviceImageRobustnessFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageRobustnessFeaturesEXT const& supported, VkPhysicalDeviceImageRobustnessFeaturesEXT const& requested); +void merge_VkPhysicalDeviceImageRobustnessFeaturesEXT(VkPhysicalDeviceImageRobustnessFeaturesEXT & current, VkPhysicalDeviceImageRobustnessFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) +void compare_VkPhysicalDeviceSubgroupSizeControlFeatures(std::vector & error_list, VkPhysicalDeviceSubgroupSizeControlFeatures const& supported, VkPhysicalDeviceSubgroupSizeControlFeatures const& requested); +void merge_VkPhysicalDeviceSubgroupSizeControlFeatures(VkPhysicalDeviceSubgroupSizeControlFeatures & current, VkPhysicalDeviceSubgroupSizeControlFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) +void compare_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const& supported, VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const& requested); +void merge_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(VkPhysicalDeviceSubgroupSizeControlFeaturesEXT & current, VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) +void compare_VkPhysicalDeviceInlineUniformBlockFeatures(std::vector & error_list, VkPhysicalDeviceInlineUniformBlockFeatures const& supported, VkPhysicalDeviceInlineUniformBlockFeatures const& requested); +void merge_VkPhysicalDeviceInlineUniformBlockFeatures(VkPhysicalDeviceInlineUniformBlockFeatures & current, VkPhysicalDeviceInlineUniformBlockFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) +void compare_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(std::vector & error_list, VkPhysicalDeviceInlineUniformBlockFeaturesEXT const& supported, VkPhysicalDeviceInlineUniformBlockFeaturesEXT const& requested); +void merge_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(VkPhysicalDeviceInlineUniformBlockFeaturesEXT & current, VkPhysicalDeviceInlineUniformBlockFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) +void compare_VkPhysicalDeviceShaderIntegerDotProductFeatures(std::vector & error_list, VkPhysicalDeviceShaderIntegerDotProductFeatures const& supported, VkPhysicalDeviceShaderIntegerDotProductFeatures const& requested); +void merge_VkPhysicalDeviceShaderIntegerDotProductFeatures(VkPhysicalDeviceShaderIntegerDotProductFeatures & current, VkPhysicalDeviceShaderIntegerDotProductFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) +void compare_VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR const& supported, VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR(VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR & current, VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) +void compare_VkPhysicalDeviceDynamicRenderingFeatures(std::vector & error_list, VkPhysicalDeviceDynamicRenderingFeatures const& supported, VkPhysicalDeviceDynamicRenderingFeatures const& requested); +void merge_VkPhysicalDeviceDynamicRenderingFeatures(VkPhysicalDeviceDynamicRenderingFeatures & current, VkPhysicalDeviceDynamicRenderingFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) +void compare_VkPhysicalDeviceDynamicRenderingFeaturesKHR(std::vector & error_list, VkPhysicalDeviceDynamicRenderingFeaturesKHR const& supported, VkPhysicalDeviceDynamicRenderingFeaturesKHR const& requested); +void merge_VkPhysicalDeviceDynamicRenderingFeaturesKHR(VkPhysicalDeviceDynamicRenderingFeaturesKHR & current, VkPhysicalDeviceDynamicRenderingFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) +#if (defined(VK_VERSION_1_4)) +void compare_VkPhysicalDeviceVulkan14Features(std::vector & error_list, VkPhysicalDeviceVulkan14Features const& supported, VkPhysicalDeviceVulkan14Features const& requested); +void merge_VkPhysicalDeviceVulkan14Features(VkPhysicalDeviceVulkan14Features & current, VkPhysicalDeviceVulkan14Features const& merge_in); +#endif //(defined(VK_VERSION_1_4)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority) || defined(VK_EXT_global_priority_query)) +void compare_VkPhysicalDeviceGlobalPriorityQueryFeatures(std::vector & error_list, VkPhysicalDeviceGlobalPriorityQueryFeatures const& supported, VkPhysicalDeviceGlobalPriorityQueryFeatures const& requested); +void merge_VkPhysicalDeviceGlobalPriorityQueryFeatures(VkPhysicalDeviceGlobalPriorityQueryFeatures & current, VkPhysicalDeviceGlobalPriorityQueryFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority) || defined(VK_EXT_global_priority_query)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority)) +void compare_VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR(std::vector & error_list, VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const& supported, VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const& requested); +void merge_VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR(VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR & current, VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_global_priority_query)) +void compare_VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT(std::vector & error_list, VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT const& supported, VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT const& requested); +void merge_VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT(VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT & current, VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_global_priority_query)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8) || defined(VK_EXT_index_type_uint8)) +void compare_VkPhysicalDeviceIndexTypeUint8Features(std::vector & error_list, VkPhysicalDeviceIndexTypeUint8Features const& supported, VkPhysicalDeviceIndexTypeUint8Features const& requested); +void merge_VkPhysicalDeviceIndexTypeUint8Features(VkPhysicalDeviceIndexTypeUint8Features & current, VkPhysicalDeviceIndexTypeUint8Features const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8) || defined(VK_EXT_index_type_uint8)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8)) +void compare_VkPhysicalDeviceIndexTypeUint8FeaturesKHR(std::vector & error_list, VkPhysicalDeviceIndexTypeUint8FeaturesKHR const& supported, VkPhysicalDeviceIndexTypeUint8FeaturesKHR const& requested); +void merge_VkPhysicalDeviceIndexTypeUint8FeaturesKHR(VkPhysicalDeviceIndexTypeUint8FeaturesKHR & current, VkPhysicalDeviceIndexTypeUint8FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_index_type_uint8)) +void compare_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(std::vector & error_list, VkPhysicalDeviceIndexTypeUint8FeaturesEXT const& supported, VkPhysicalDeviceIndexTypeUint8FeaturesEXT const& requested); +void merge_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(VkPhysicalDeviceIndexTypeUint8FeaturesEXT & current, VkPhysicalDeviceIndexTypeUint8FeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_index_type_uint8)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) +void compare_VkPhysicalDeviceMaintenance5Features(std::vector & error_list, VkPhysicalDeviceMaintenance5Features const& supported, VkPhysicalDeviceMaintenance5Features const& requested); +void merge_VkPhysicalDeviceMaintenance5Features(VkPhysicalDeviceMaintenance5Features & current, VkPhysicalDeviceMaintenance5Features const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) +void compare_VkPhysicalDeviceMaintenance5FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance5FeaturesKHR const& supported, VkPhysicalDeviceMaintenance5FeaturesKHR const& requested); +void merge_VkPhysicalDeviceMaintenance5FeaturesKHR(VkPhysicalDeviceMaintenance5FeaturesKHR & current, VkPhysicalDeviceMaintenance5FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) +void compare_VkPhysicalDeviceMaintenance6Features(std::vector & error_list, VkPhysicalDeviceMaintenance6Features const& supported, VkPhysicalDeviceMaintenance6Features const& requested); +void merge_VkPhysicalDeviceMaintenance6Features(VkPhysicalDeviceMaintenance6Features & current, VkPhysicalDeviceMaintenance6Features const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) +void compare_VkPhysicalDeviceMaintenance6FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance6FeaturesKHR const& supported, VkPhysicalDeviceMaintenance6FeaturesKHR const& requested); +void merge_VkPhysicalDeviceMaintenance6FeaturesKHR(VkPhysicalDeviceMaintenance6FeaturesKHR & current, VkPhysicalDeviceMaintenance6FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) +void compare_VkPhysicalDeviceHostImageCopyFeatures(std::vector & error_list, VkPhysicalDeviceHostImageCopyFeatures const& supported, VkPhysicalDeviceHostImageCopyFeatures const& requested); +void merge_VkPhysicalDeviceHostImageCopyFeatures(VkPhysicalDeviceHostImageCopyFeatures & current, VkPhysicalDeviceHostImageCopyFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) +void compare_VkPhysicalDeviceHostImageCopyFeaturesEXT(std::vector & error_list, VkPhysicalDeviceHostImageCopyFeaturesEXT const& supported, VkPhysicalDeviceHostImageCopyFeaturesEXT const& requested); +void merge_VkPhysicalDeviceHostImageCopyFeaturesEXT(VkPhysicalDeviceHostImageCopyFeaturesEXT & current, VkPhysicalDeviceHostImageCopyFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) +void compare_VkPhysicalDeviceShaderSubgroupRotateFeatures(std::vector & error_list, VkPhysicalDeviceShaderSubgroupRotateFeatures const& supported, VkPhysicalDeviceShaderSubgroupRotateFeatures const& requested); +void merge_VkPhysicalDeviceShaderSubgroupRotateFeatures(VkPhysicalDeviceShaderSubgroupRotateFeatures & current, VkPhysicalDeviceShaderSubgroupRotateFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) +void compare_VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const& supported, VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR(VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR & current, VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) +void compare_VkPhysicalDeviceShaderFloatControls2Features(std::vector & error_list, VkPhysicalDeviceShaderFloatControls2Features const& supported, VkPhysicalDeviceShaderFloatControls2Features const& requested); +void merge_VkPhysicalDeviceShaderFloatControls2Features(VkPhysicalDeviceShaderFloatControls2Features & current, VkPhysicalDeviceShaderFloatControls2Features const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) +void compare_VkPhysicalDeviceShaderFloatControls2FeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderFloatControls2FeaturesKHR const& supported, VkPhysicalDeviceShaderFloatControls2FeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderFloatControls2FeaturesKHR(VkPhysicalDeviceShaderFloatControls2FeaturesKHR & current, VkPhysicalDeviceShaderFloatControls2FeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) +void compare_VkPhysicalDeviceShaderExpectAssumeFeatures(std::vector & error_list, VkPhysicalDeviceShaderExpectAssumeFeatures const& supported, VkPhysicalDeviceShaderExpectAssumeFeatures const& requested); +void merge_VkPhysicalDeviceShaderExpectAssumeFeatures(VkPhysicalDeviceShaderExpectAssumeFeatures & current, VkPhysicalDeviceShaderExpectAssumeFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) +void compare_VkPhysicalDeviceShaderExpectAssumeFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const& supported, VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderExpectAssumeFeaturesKHR(VkPhysicalDeviceShaderExpectAssumeFeaturesKHR & current, VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) +void compare_VkPhysicalDevicePipelineProtectedAccessFeatures(std::vector & error_list, VkPhysicalDevicePipelineProtectedAccessFeatures const& supported, VkPhysicalDevicePipelineProtectedAccessFeatures const& requested); +void merge_VkPhysicalDevicePipelineProtectedAccessFeatures(VkPhysicalDevicePipelineProtectedAccessFeatures & current, VkPhysicalDevicePipelineProtectedAccessFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) +void compare_VkPhysicalDevicePipelineProtectedAccessFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const& supported, VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const& requested); +void merge_VkPhysicalDevicePipelineProtectedAccessFeaturesEXT(VkPhysicalDevicePipelineProtectedAccessFeaturesEXT & current, VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) +void compare_VkPhysicalDevicePipelineRobustnessFeatures(std::vector & error_list, VkPhysicalDevicePipelineRobustnessFeatures const& supported, VkPhysicalDevicePipelineRobustnessFeatures const& requested); +void merge_VkPhysicalDevicePipelineRobustnessFeatures(VkPhysicalDevicePipelineRobustnessFeatures & current, VkPhysicalDevicePipelineRobustnessFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) +void compare_VkPhysicalDevicePipelineRobustnessFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelineRobustnessFeaturesEXT const& supported, VkPhysicalDevicePipelineRobustnessFeaturesEXT const& requested); +void merge_VkPhysicalDevicePipelineRobustnessFeaturesEXT(VkPhysicalDevicePipelineRobustnessFeaturesEXT & current, VkPhysicalDevicePipelineRobustnessFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization) || defined(VK_EXT_line_rasterization)) +void compare_VkPhysicalDeviceLineRasterizationFeatures(std::vector & error_list, VkPhysicalDeviceLineRasterizationFeatures const& supported, VkPhysicalDeviceLineRasterizationFeatures const& requested); +void merge_VkPhysicalDeviceLineRasterizationFeatures(VkPhysicalDeviceLineRasterizationFeatures & current, VkPhysicalDeviceLineRasterizationFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization) || defined(VK_EXT_line_rasterization)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization)) +void compare_VkPhysicalDeviceLineRasterizationFeaturesKHR(std::vector & error_list, VkPhysicalDeviceLineRasterizationFeaturesKHR const& supported, VkPhysicalDeviceLineRasterizationFeaturesKHR const& requested); +void merge_VkPhysicalDeviceLineRasterizationFeaturesKHR(VkPhysicalDeviceLineRasterizationFeaturesKHR & current, VkPhysicalDeviceLineRasterizationFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_line_rasterization)) +void compare_VkPhysicalDeviceLineRasterizationFeaturesEXT(std::vector & error_list, VkPhysicalDeviceLineRasterizationFeaturesEXT const& supported, VkPhysicalDeviceLineRasterizationFeaturesEXT const& requested); +void merge_VkPhysicalDeviceLineRasterizationFeaturesEXT(VkPhysicalDeviceLineRasterizationFeaturesEXT & current, VkPhysicalDeviceLineRasterizationFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_line_rasterization)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor) || defined(VK_EXT_vertex_attribute_divisor)) +void compare_VkPhysicalDeviceVertexAttributeDivisorFeatures(std::vector & error_list, VkPhysicalDeviceVertexAttributeDivisorFeatures const& supported, VkPhysicalDeviceVertexAttributeDivisorFeatures const& requested); +void merge_VkPhysicalDeviceVertexAttributeDivisorFeatures(VkPhysicalDeviceVertexAttributeDivisorFeatures & current, VkPhysicalDeviceVertexAttributeDivisorFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor) || defined(VK_EXT_vertex_attribute_divisor)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor)) +void compare_VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const& supported, VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const& requested); +void merge_VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR(VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR & current, VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_vertex_attribute_divisor)) +void compare_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(std::vector & error_list, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const& supported, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const& requested); +void merge_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT & current, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_vertex_attribute_divisor)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) +void compare_VkPhysicalDeviceDynamicRenderingLocalReadFeatures(std::vector & error_list, VkPhysicalDeviceDynamicRenderingLocalReadFeatures const& supported, VkPhysicalDeviceDynamicRenderingLocalReadFeatures const& requested); +void merge_VkPhysicalDeviceDynamicRenderingLocalReadFeatures(VkPhysicalDeviceDynamicRenderingLocalReadFeatures & current, VkPhysicalDeviceDynamicRenderingLocalReadFeatures const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) +void compare_VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR(std::vector & error_list, VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const& supported, VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const& requested); +void merge_VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR(VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR & current, VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const& merge_in); +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) +#if (defined(VK_KHR_performance_query)) +void compare_VkPhysicalDevicePerformanceQueryFeaturesKHR(std::vector & error_list, VkPhysicalDevicePerformanceQueryFeaturesKHR const& supported, VkPhysicalDevicePerformanceQueryFeaturesKHR const& requested); +void merge_VkPhysicalDevicePerformanceQueryFeaturesKHR(VkPhysicalDevicePerformanceQueryFeaturesKHR & current, VkPhysicalDevicePerformanceQueryFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_performance_query)) +#if (defined(VK_KHR_shader_bfloat16)) +void compare_VkPhysicalDeviceShaderBfloat16FeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderBfloat16FeaturesKHR const& supported, VkPhysicalDeviceShaderBfloat16FeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderBfloat16FeaturesKHR(VkPhysicalDeviceShaderBfloat16FeaturesKHR & current, VkPhysicalDeviceShaderBfloat16FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_shader_bfloat16)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_KHR_portability_subset)) +void compare_VkPhysicalDevicePortabilitySubsetFeaturesKHR(std::vector & error_list, VkPhysicalDevicePortabilitySubsetFeaturesKHR const& supported, VkPhysicalDevicePortabilitySubsetFeaturesKHR const& requested); +void merge_VkPhysicalDevicePortabilitySubsetFeaturesKHR(VkPhysicalDevicePortabilitySubsetFeaturesKHR & current, VkPhysicalDevicePortabilitySubsetFeaturesKHR const& merge_in); +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_KHR_portability_subset)) +#if (defined(VK_KHR_shader_clock)) +void compare_VkPhysicalDeviceShaderClockFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderClockFeaturesKHR const& supported, VkPhysicalDeviceShaderClockFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderClockFeaturesKHR(VkPhysicalDeviceShaderClockFeaturesKHR & current, VkPhysicalDeviceShaderClockFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_shader_clock)) +#if (defined(VK_KHR_fragment_shading_rate)) +void compare_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(std::vector & error_list, VkPhysicalDeviceFragmentShadingRateFeaturesKHR const& supported, VkPhysicalDeviceFragmentShadingRateFeaturesKHR const& requested); +void merge_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(VkPhysicalDeviceFragmentShadingRateFeaturesKHR & current, VkPhysicalDeviceFragmentShadingRateFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_fragment_shading_rate)) +#if (defined(VK_KHR_shader_quad_control)) +void compare_VkPhysicalDeviceShaderQuadControlFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderQuadControlFeaturesKHR const& supported, VkPhysicalDeviceShaderQuadControlFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderQuadControlFeaturesKHR(VkPhysicalDeviceShaderQuadControlFeaturesKHR & current, VkPhysicalDeviceShaderQuadControlFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_shader_quad_control)) +#if (defined(VK_KHR_present_wait)) +void compare_VkPhysicalDevicePresentWaitFeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentWaitFeaturesKHR const& supported, VkPhysicalDevicePresentWaitFeaturesKHR const& requested); +void merge_VkPhysicalDevicePresentWaitFeaturesKHR(VkPhysicalDevicePresentWaitFeaturesKHR & current, VkPhysicalDevicePresentWaitFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_present_wait)) +#if (defined(VK_KHR_pipeline_executable_properties)) +void compare_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(std::vector & error_list, VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& supported, VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& requested); +void merge_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR & current, VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_pipeline_executable_properties)) +#if (defined(VK_KHR_present_id)) +void compare_VkPhysicalDevicePresentIdFeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentIdFeaturesKHR const& supported, VkPhysicalDevicePresentIdFeaturesKHR const& requested); +void merge_VkPhysicalDevicePresentIdFeaturesKHR(VkPhysicalDevicePresentIdFeaturesKHR & current, VkPhysicalDevicePresentIdFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_present_id)) +#if (defined(VK_KHR_fragment_shader_barycentric)) +void compare_VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR(std::vector & error_list, VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR const& supported, VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR const& requested); +void merge_VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR(VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR & current, VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_fragment_shader_barycentric)) +#if (defined(VK_NV_fragment_shader_barycentric)) +void compare_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(std::vector & error_list, VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const& supported, VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const& requested); +void merge_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV & current, VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const& merge_in); +#endif //(defined(VK_NV_fragment_shader_barycentric)) +#if (defined(VK_KHR_shader_subgroup_uniform_control_flow)) +void compare_VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR const& supported, VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR(VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR & current, VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_shader_subgroup_uniform_control_flow)) +#if (defined(VK_KHR_workgroup_memory_explicit_layout)) +void compare_VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR(std::vector & error_list, VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR const& supported, VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR const& requested); +void merge_VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR(VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR & current, VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_workgroup_memory_explicit_layout)) +#if (defined(VK_KHR_ray_tracing_maintenance1)) +void compare_VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR(std::vector & error_list, VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR const& supported, VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR const& requested); +void merge_VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR(VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR & current, VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_ray_tracing_maintenance1)) +#if (defined(VK_KHR_shader_untyped_pointers)) +void compare_VkPhysicalDeviceShaderUntypedPointersFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderUntypedPointersFeaturesKHR const& supported, VkPhysicalDeviceShaderUntypedPointersFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderUntypedPointersFeaturesKHR(VkPhysicalDeviceShaderUntypedPointersFeaturesKHR & current, VkPhysicalDeviceShaderUntypedPointersFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_shader_untyped_pointers)) +#if (defined(VK_KHR_shader_maximal_reconvergence)) +void compare_VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR const& supported, VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR(VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR & current, VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_shader_maximal_reconvergence)) +#if (defined(VK_KHR_present_id2)) +void compare_VkPhysicalDevicePresentId2FeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentId2FeaturesKHR const& supported, VkPhysicalDevicePresentId2FeaturesKHR const& requested); +void merge_VkPhysicalDevicePresentId2FeaturesKHR(VkPhysicalDevicePresentId2FeaturesKHR & current, VkPhysicalDevicePresentId2FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_present_id2)) +#if (defined(VK_KHR_present_wait2)) +void compare_VkPhysicalDevicePresentWait2FeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentWait2FeaturesKHR const& supported, VkPhysicalDevicePresentWait2FeaturesKHR const& requested); +void merge_VkPhysicalDevicePresentWait2FeaturesKHR(VkPhysicalDevicePresentWait2FeaturesKHR & current, VkPhysicalDevicePresentWait2FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_present_wait2)) +#if (defined(VK_KHR_ray_tracing_position_fetch)) +void compare_VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR(std::vector & error_list, VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR const& supported, VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR const& requested); +void merge_VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR(VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR & current, VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_ray_tracing_position_fetch)) +#if (defined(VK_KHR_pipeline_binary)) +void compare_VkPhysicalDevicePipelineBinaryFeaturesKHR(std::vector & error_list, VkPhysicalDevicePipelineBinaryFeaturesKHR const& supported, VkPhysicalDevicePipelineBinaryFeaturesKHR const& requested); +void merge_VkPhysicalDevicePipelineBinaryFeaturesKHR(VkPhysicalDevicePipelineBinaryFeaturesKHR & current, VkPhysicalDevicePipelineBinaryFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_pipeline_binary)) +#if (defined(VK_KHR_swapchain_maintenance1)) +void compare_VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR(std::vector & error_list, VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR const& supported, VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR const& requested); +void merge_VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR(VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR & current, VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_swapchain_maintenance1)) +#if (defined(VK_EXT_swapchain_maintenance1)) +void compare_VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT(std::vector & error_list, VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT const& supported, VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT const& requested); +void merge_VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT(VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT & current, VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_swapchain_maintenance1)) +#if (defined(VK_KHR_cooperative_matrix)) +void compare_VkPhysicalDeviceCooperativeMatrixFeaturesKHR(std::vector & error_list, VkPhysicalDeviceCooperativeMatrixFeaturesKHR const& supported, VkPhysicalDeviceCooperativeMatrixFeaturesKHR const& requested); +void merge_VkPhysicalDeviceCooperativeMatrixFeaturesKHR(VkPhysicalDeviceCooperativeMatrixFeaturesKHR & current, VkPhysicalDeviceCooperativeMatrixFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_cooperative_matrix)) +#if (defined(VK_KHR_compute_shader_derivatives)) +void compare_VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR(std::vector & error_list, VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const& supported, VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const& requested); +void merge_VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR(VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR & current, VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_compute_shader_derivatives)) +#if (defined(VK_NV_compute_shader_derivatives)) +void compare_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(std::vector & error_list, VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const& supported, VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const& requested); +void merge_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(VkPhysicalDeviceComputeShaderDerivativesFeaturesNV & current, VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const& merge_in); +#endif //(defined(VK_NV_compute_shader_derivatives)) +#if (defined(VK_KHR_video_encode_av1)) +void compare_VkPhysicalDeviceVideoEncodeAV1FeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoEncodeAV1FeaturesKHR const& supported, VkPhysicalDeviceVideoEncodeAV1FeaturesKHR const& requested); +void merge_VkPhysicalDeviceVideoEncodeAV1FeaturesKHR(VkPhysicalDeviceVideoEncodeAV1FeaturesKHR & current, VkPhysicalDeviceVideoEncodeAV1FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_video_encode_av1)) +#if (defined(VK_KHR_video_decode_vp9)) +void compare_VkPhysicalDeviceVideoDecodeVP9FeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoDecodeVP9FeaturesKHR const& supported, VkPhysicalDeviceVideoDecodeVP9FeaturesKHR const& requested); +void merge_VkPhysicalDeviceVideoDecodeVP9FeaturesKHR(VkPhysicalDeviceVideoDecodeVP9FeaturesKHR & current, VkPhysicalDeviceVideoDecodeVP9FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_video_decode_vp9)) +#if (defined(VK_KHR_video_maintenance1)) +void compare_VkPhysicalDeviceVideoMaintenance1FeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoMaintenance1FeaturesKHR const& supported, VkPhysicalDeviceVideoMaintenance1FeaturesKHR const& requested); +void merge_VkPhysicalDeviceVideoMaintenance1FeaturesKHR(VkPhysicalDeviceVideoMaintenance1FeaturesKHR & current, VkPhysicalDeviceVideoMaintenance1FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_video_maintenance1)) +#if (defined(VK_KHR_unified_image_layouts)) +void compare_VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR(std::vector & error_list, VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR const& supported, VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR const& requested); +void merge_VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR(VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR & current, VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_unified_image_layouts)) +#if (defined(VK_KHR_copy_memory_indirect)) +void compare_VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR(std::vector & error_list, VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR const& supported, VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR const& requested); +void merge_VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR(VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR & current, VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_copy_memory_indirect)) +#if (defined(VK_KHR_video_encode_intra_refresh)) +void compare_VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR const& supported, VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR const& requested); +void merge_VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR(VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR & current, VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_video_encode_intra_refresh)) +#if (defined(VK_KHR_video_encode_quantization_map)) +void compare_VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR const& supported, VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR const& requested); +void merge_VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR(VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR & current, VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_video_encode_quantization_map)) +#if (defined(VK_KHR_shader_relaxed_extended_instruction)) +void compare_VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const& supported, VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR(VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR & current, VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_shader_relaxed_extended_instruction)) +#if (defined(VK_KHR_maintenance7)) +void compare_VkPhysicalDeviceMaintenance7FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance7FeaturesKHR const& supported, VkPhysicalDeviceMaintenance7FeaturesKHR const& requested); +void merge_VkPhysicalDeviceMaintenance7FeaturesKHR(VkPhysicalDeviceMaintenance7FeaturesKHR & current, VkPhysicalDeviceMaintenance7FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_maintenance7)) +#if (defined(VK_KHR_maintenance8)) +void compare_VkPhysicalDeviceMaintenance8FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance8FeaturesKHR const& supported, VkPhysicalDeviceMaintenance8FeaturesKHR const& requested); +void merge_VkPhysicalDeviceMaintenance8FeaturesKHR(VkPhysicalDeviceMaintenance8FeaturesKHR & current, VkPhysicalDeviceMaintenance8FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_maintenance8)) +#if (defined(VK_KHR_shader_fma)) +void compare_VkPhysicalDeviceShaderFmaFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderFmaFeaturesKHR const& supported, VkPhysicalDeviceShaderFmaFeaturesKHR const& requested); +void merge_VkPhysicalDeviceShaderFmaFeaturesKHR(VkPhysicalDeviceShaderFmaFeaturesKHR & current, VkPhysicalDeviceShaderFmaFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_shader_fma)) +#if (defined(VK_KHR_maintenance9)) +void compare_VkPhysicalDeviceMaintenance9FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance9FeaturesKHR const& supported, VkPhysicalDeviceMaintenance9FeaturesKHR const& requested); +void merge_VkPhysicalDeviceMaintenance9FeaturesKHR(VkPhysicalDeviceMaintenance9FeaturesKHR & current, VkPhysicalDeviceMaintenance9FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_maintenance9)) +#if (defined(VK_KHR_video_maintenance2)) +void compare_VkPhysicalDeviceVideoMaintenance2FeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoMaintenance2FeaturesKHR const& supported, VkPhysicalDeviceVideoMaintenance2FeaturesKHR const& requested); +void merge_VkPhysicalDeviceVideoMaintenance2FeaturesKHR(VkPhysicalDeviceVideoMaintenance2FeaturesKHR & current, VkPhysicalDeviceVideoMaintenance2FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_video_maintenance2)) +#if (defined(VK_KHR_depth_clamp_zero_one)) +void compare_VkPhysicalDeviceDepthClampZeroOneFeaturesKHR(std::vector & error_list, VkPhysicalDeviceDepthClampZeroOneFeaturesKHR const& supported, VkPhysicalDeviceDepthClampZeroOneFeaturesKHR const& requested); +void merge_VkPhysicalDeviceDepthClampZeroOneFeaturesKHR(VkPhysicalDeviceDepthClampZeroOneFeaturesKHR & current, VkPhysicalDeviceDepthClampZeroOneFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_depth_clamp_zero_one)) +#if (defined(VK_EXT_depth_clamp_zero_one)) +void compare_VkPhysicalDeviceDepthClampZeroOneFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthClampZeroOneFeaturesEXT const& supported, VkPhysicalDeviceDepthClampZeroOneFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDepthClampZeroOneFeaturesEXT(VkPhysicalDeviceDepthClampZeroOneFeaturesEXT & current, VkPhysicalDeviceDepthClampZeroOneFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_depth_clamp_zero_one)) +#if (defined(VK_KHR_robustness2)) +void compare_VkPhysicalDeviceRobustness2FeaturesKHR(std::vector & error_list, VkPhysicalDeviceRobustness2FeaturesKHR const& supported, VkPhysicalDeviceRobustness2FeaturesKHR const& requested); +void merge_VkPhysicalDeviceRobustness2FeaturesKHR(VkPhysicalDeviceRobustness2FeaturesKHR & current, VkPhysicalDeviceRobustness2FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_robustness2)) +#if (defined(VK_EXT_robustness2)) +void compare_VkPhysicalDeviceRobustness2FeaturesEXT(std::vector & error_list, VkPhysicalDeviceRobustness2FeaturesEXT const& supported, VkPhysicalDeviceRobustness2FeaturesEXT const& requested); +void merge_VkPhysicalDeviceRobustness2FeaturesEXT(VkPhysicalDeviceRobustness2FeaturesEXT & current, VkPhysicalDeviceRobustness2FeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_robustness2)) +#if (defined(VK_KHR_present_mode_fifo_latest_ready)) +void compare_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR const& supported, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR const& requested); +void merge_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR(VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR & current, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_present_mode_fifo_latest_ready)) +#if (defined(VK_EXT_present_mode_fifo_latest_ready)) +void compare_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT(std::vector & error_list, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT const& supported, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT const& requested); +void merge_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT(VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT & current, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_present_mode_fifo_latest_ready)) +#if (defined(VK_KHR_maintenance10)) +void compare_VkPhysicalDeviceMaintenance10FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance10FeaturesKHR const& supported, VkPhysicalDeviceMaintenance10FeaturesKHR const& requested); +void merge_VkPhysicalDeviceMaintenance10FeaturesKHR(VkPhysicalDeviceMaintenance10FeaturesKHR & current, VkPhysicalDeviceMaintenance10FeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_maintenance10)) +#if (defined(VK_EXT_transform_feedback)) +void compare_VkPhysicalDeviceTransformFeedbackFeaturesEXT(std::vector & error_list, VkPhysicalDeviceTransformFeedbackFeaturesEXT const& supported, VkPhysicalDeviceTransformFeedbackFeaturesEXT const& requested); +void merge_VkPhysicalDeviceTransformFeedbackFeaturesEXT(VkPhysicalDeviceTransformFeedbackFeaturesEXT & current, VkPhysicalDeviceTransformFeedbackFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_transform_feedback)) +#if (defined(VK_NV_corner_sampled_image)) +void compare_VkPhysicalDeviceCornerSampledImageFeaturesNV(std::vector & error_list, VkPhysicalDeviceCornerSampledImageFeaturesNV const& supported, VkPhysicalDeviceCornerSampledImageFeaturesNV const& requested); +void merge_VkPhysicalDeviceCornerSampledImageFeaturesNV(VkPhysicalDeviceCornerSampledImageFeaturesNV & current, VkPhysicalDeviceCornerSampledImageFeaturesNV const& merge_in); +#endif //(defined(VK_NV_corner_sampled_image)) +#if (defined(VK_EXT_astc_decode_mode)) +void compare_VkPhysicalDeviceASTCDecodeFeaturesEXT(std::vector & error_list, VkPhysicalDeviceASTCDecodeFeaturesEXT const& supported, VkPhysicalDeviceASTCDecodeFeaturesEXT const& requested); +void merge_VkPhysicalDeviceASTCDecodeFeaturesEXT(VkPhysicalDeviceASTCDecodeFeaturesEXT & current, VkPhysicalDeviceASTCDecodeFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_astc_decode_mode)) +#if (defined(VK_EXT_conditional_rendering)) +void compare_VkPhysicalDeviceConditionalRenderingFeaturesEXT(std::vector & error_list, VkPhysicalDeviceConditionalRenderingFeaturesEXT const& supported, VkPhysicalDeviceConditionalRenderingFeaturesEXT const& requested); +void merge_VkPhysicalDeviceConditionalRenderingFeaturesEXT(VkPhysicalDeviceConditionalRenderingFeaturesEXT & current, VkPhysicalDeviceConditionalRenderingFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_conditional_rendering)) +#if (defined(VK_EXT_depth_clip_enable)) +void compare_VkPhysicalDeviceDepthClipEnableFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthClipEnableFeaturesEXT const& supported, VkPhysicalDeviceDepthClipEnableFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDepthClipEnableFeaturesEXT(VkPhysicalDeviceDepthClipEnableFeaturesEXT & current, VkPhysicalDeviceDepthClipEnableFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_depth_clip_enable)) +#if (defined(VK_IMG_relaxed_line_rasterization)) +void compare_VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG(std::vector & error_list, VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG const& supported, VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG const& requested); +void merge_VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG(VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG & current, VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG const& merge_in); +#endif //(defined(VK_IMG_relaxed_line_rasterization)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_shader_enqueue)) +void compare_VkPhysicalDeviceShaderEnqueueFeaturesAMDX(std::vector & error_list, VkPhysicalDeviceShaderEnqueueFeaturesAMDX const& supported, VkPhysicalDeviceShaderEnqueueFeaturesAMDX const& requested); +void merge_VkPhysicalDeviceShaderEnqueueFeaturesAMDX(VkPhysicalDeviceShaderEnqueueFeaturesAMDX & current, VkPhysicalDeviceShaderEnqueueFeaturesAMDX const& merge_in); +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_shader_enqueue)) +#if (defined(VK_EXT_blend_operation_advanced)) +void compare_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(std::vector & error_list, VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const& supported, VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const& requested); +void merge_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT & current, VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_blend_operation_advanced)) +#if (defined(VK_NV_shader_sm_builtins)) +void compare_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(std::vector & error_list, VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const& supported, VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const& requested); +void merge_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(VkPhysicalDeviceShaderSMBuiltinsFeaturesNV & current, VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const& merge_in); +#endif //(defined(VK_NV_shader_sm_builtins)) +#if (defined(VK_NV_shading_rate_image)) +void compare_VkPhysicalDeviceShadingRateImageFeaturesNV(std::vector & error_list, VkPhysicalDeviceShadingRateImageFeaturesNV const& supported, VkPhysicalDeviceShadingRateImageFeaturesNV const& requested); +void merge_VkPhysicalDeviceShadingRateImageFeaturesNV(VkPhysicalDeviceShadingRateImageFeaturesNV & current, VkPhysicalDeviceShadingRateImageFeaturesNV const& merge_in); +#endif //(defined(VK_NV_shading_rate_image)) +#if (defined(VK_NV_representative_fragment_test)) +void compare_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(std::vector & error_list, VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const& supported, VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const& requested); +void merge_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV & current, VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const& merge_in); +#endif //(defined(VK_NV_representative_fragment_test)) +#if (defined(VK_NV_mesh_shader)) +void compare_VkPhysicalDeviceMeshShaderFeaturesNV(std::vector & error_list, VkPhysicalDeviceMeshShaderFeaturesNV const& supported, VkPhysicalDeviceMeshShaderFeaturesNV const& requested); +void merge_VkPhysicalDeviceMeshShaderFeaturesNV(VkPhysicalDeviceMeshShaderFeaturesNV & current, VkPhysicalDeviceMeshShaderFeaturesNV const& merge_in); +#endif //(defined(VK_NV_mesh_shader)) +#if (defined(VK_NV_shader_image_footprint)) +void compare_VkPhysicalDeviceShaderImageFootprintFeaturesNV(std::vector & error_list, VkPhysicalDeviceShaderImageFootprintFeaturesNV const& supported, VkPhysicalDeviceShaderImageFootprintFeaturesNV const& requested); +void merge_VkPhysicalDeviceShaderImageFootprintFeaturesNV(VkPhysicalDeviceShaderImageFootprintFeaturesNV & current, VkPhysicalDeviceShaderImageFootprintFeaturesNV const& merge_in); +#endif //(defined(VK_NV_shader_image_footprint)) +#if (defined(VK_NV_scissor_exclusive)) +void compare_VkPhysicalDeviceExclusiveScissorFeaturesNV(std::vector & error_list, VkPhysicalDeviceExclusiveScissorFeaturesNV const& supported, VkPhysicalDeviceExclusiveScissorFeaturesNV const& requested); +void merge_VkPhysicalDeviceExclusiveScissorFeaturesNV(VkPhysicalDeviceExclusiveScissorFeaturesNV & current, VkPhysicalDeviceExclusiveScissorFeaturesNV const& merge_in); +#endif //(defined(VK_NV_scissor_exclusive)) +#if (defined(VK_INTEL_shader_integer_functions2)) +void compare_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(std::vector & error_list, VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& supported, VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& requested); +void merge_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL & current, VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& merge_in); +#endif //(defined(VK_INTEL_shader_integer_functions2)) +#if (defined(VK_EXT_fragment_density_map)) +void compare_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFragmentDensityMapFeaturesEXT const& supported, VkPhysicalDeviceFragmentDensityMapFeaturesEXT const& requested); +void merge_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(VkPhysicalDeviceFragmentDensityMapFeaturesEXT & current, VkPhysicalDeviceFragmentDensityMapFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_fragment_density_map)) +#if (defined(VK_AMD_device_coherent_memory)) +void compare_VkPhysicalDeviceCoherentMemoryFeaturesAMD(std::vector & error_list, VkPhysicalDeviceCoherentMemoryFeaturesAMD const& supported, VkPhysicalDeviceCoherentMemoryFeaturesAMD const& requested); +void merge_VkPhysicalDeviceCoherentMemoryFeaturesAMD(VkPhysicalDeviceCoherentMemoryFeaturesAMD & current, VkPhysicalDeviceCoherentMemoryFeaturesAMD const& merge_in); +#endif //(defined(VK_AMD_device_coherent_memory)) +#if (defined(VK_EXT_shader_image_atomic_int64)) +void compare_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& supported, VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT & current, VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_image_atomic_int64)) +#if (defined(VK_EXT_memory_priority)) +void compare_VkPhysicalDeviceMemoryPriorityFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMemoryPriorityFeaturesEXT const& supported, VkPhysicalDeviceMemoryPriorityFeaturesEXT const& requested); +void merge_VkPhysicalDeviceMemoryPriorityFeaturesEXT(VkPhysicalDeviceMemoryPriorityFeaturesEXT & current, VkPhysicalDeviceMemoryPriorityFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_memory_priority)) +#if (defined(VK_NV_dedicated_allocation_image_aliasing)) +void compare_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(std::vector & error_list, VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& supported, VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& requested); +void merge_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV & current, VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& merge_in); +#endif //(defined(VK_NV_dedicated_allocation_image_aliasing)) +#if (defined(VK_EXT_buffer_device_address)) +void compare_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(std::vector & error_list, VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const& supported, VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const& requested); +void merge_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(VkPhysicalDeviceBufferDeviceAddressFeaturesEXT & current, VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_buffer_device_address)) +#if (defined(VK_EXT_buffer_device_address)) +void compare_VkPhysicalDeviceBufferAddressFeaturesEXT(std::vector & error_list, VkPhysicalDeviceBufferAddressFeaturesEXT const& supported, VkPhysicalDeviceBufferAddressFeaturesEXT const& requested); +void merge_VkPhysicalDeviceBufferAddressFeaturesEXT(VkPhysicalDeviceBufferAddressFeaturesEXT & current, VkPhysicalDeviceBufferAddressFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_buffer_device_address)) +#if (defined(VK_NV_cooperative_matrix)) +void compare_VkPhysicalDeviceCooperativeMatrixFeaturesNV(std::vector & error_list, VkPhysicalDeviceCooperativeMatrixFeaturesNV const& supported, VkPhysicalDeviceCooperativeMatrixFeaturesNV const& requested); +void merge_VkPhysicalDeviceCooperativeMatrixFeaturesNV(VkPhysicalDeviceCooperativeMatrixFeaturesNV & current, VkPhysicalDeviceCooperativeMatrixFeaturesNV const& merge_in); +#endif //(defined(VK_NV_cooperative_matrix)) +#if (defined(VK_NV_coverage_reduction_mode)) +void compare_VkPhysicalDeviceCoverageReductionModeFeaturesNV(std::vector & error_list, VkPhysicalDeviceCoverageReductionModeFeaturesNV const& supported, VkPhysicalDeviceCoverageReductionModeFeaturesNV const& requested); +void merge_VkPhysicalDeviceCoverageReductionModeFeaturesNV(VkPhysicalDeviceCoverageReductionModeFeaturesNV & current, VkPhysicalDeviceCoverageReductionModeFeaturesNV const& merge_in); +#endif //(defined(VK_NV_coverage_reduction_mode)) +#if (defined(VK_EXT_fragment_shader_interlock)) +void compare_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const& supported, VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const& requested); +void merge_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT & current, VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_fragment_shader_interlock)) +#if (defined(VK_EXT_ycbcr_image_arrays)) +void compare_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(std::vector & error_list, VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const& supported, VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const& requested); +void merge_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(VkPhysicalDeviceYcbcrImageArraysFeaturesEXT & current, VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_ycbcr_image_arrays)) +#if (defined(VK_EXT_provoking_vertex)) +void compare_VkPhysicalDeviceProvokingVertexFeaturesEXT(std::vector & error_list, VkPhysicalDeviceProvokingVertexFeaturesEXT const& supported, VkPhysicalDeviceProvokingVertexFeaturesEXT const& requested); +void merge_VkPhysicalDeviceProvokingVertexFeaturesEXT(VkPhysicalDeviceProvokingVertexFeaturesEXT & current, VkPhysicalDeviceProvokingVertexFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_provoking_vertex)) +#if (defined(VK_EXT_shader_atomic_float)) +void compare_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const& supported, VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(VkPhysicalDeviceShaderAtomicFloatFeaturesEXT & current, VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_atomic_float)) +#if (defined(VK_EXT_extended_dynamic_state)) +void compare_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(std::vector & error_list, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const& supported, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const& requested); +void merge_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT & current, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_extended_dynamic_state)) +#if (defined(VK_EXT_map_memory_placed)) +void compare_VkPhysicalDeviceMapMemoryPlacedFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMapMemoryPlacedFeaturesEXT const& supported, VkPhysicalDeviceMapMemoryPlacedFeaturesEXT const& requested); +void merge_VkPhysicalDeviceMapMemoryPlacedFeaturesEXT(VkPhysicalDeviceMapMemoryPlacedFeaturesEXT & current, VkPhysicalDeviceMapMemoryPlacedFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_map_memory_placed)) +#if (defined(VK_EXT_shader_atomic_float2)) +void compare_VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT const& supported, VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT(VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT & current, VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_atomic_float2)) +#if (defined(VK_NV_device_generated_commands)) +void compare_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(std::vector & error_list, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& supported, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& requested); +void merge_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV & current, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& merge_in); +#endif //(defined(VK_NV_device_generated_commands)) +#if (defined(VK_NV_inherited_viewport_scissor)) +void compare_VkPhysicalDeviceInheritedViewportScissorFeaturesNV(std::vector & error_list, VkPhysicalDeviceInheritedViewportScissorFeaturesNV const& supported, VkPhysicalDeviceInheritedViewportScissorFeaturesNV const& requested); +void merge_VkPhysicalDeviceInheritedViewportScissorFeaturesNV(VkPhysicalDeviceInheritedViewportScissorFeaturesNV & current, VkPhysicalDeviceInheritedViewportScissorFeaturesNV const& merge_in); +#endif //(defined(VK_NV_inherited_viewport_scissor)) +#if (defined(VK_EXT_texel_buffer_alignment)) +void compare_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(std::vector & error_list, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const& supported, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const& requested); +void merge_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT & current, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_texel_buffer_alignment)) +#if (defined(VK_EXT_depth_bias_control)) +void compare_VkPhysicalDeviceDepthBiasControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthBiasControlFeaturesEXT const& supported, VkPhysicalDeviceDepthBiasControlFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDepthBiasControlFeaturesEXT(VkPhysicalDeviceDepthBiasControlFeaturesEXT & current, VkPhysicalDeviceDepthBiasControlFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_depth_bias_control)) +#if (defined(VK_EXT_device_memory_report)) +void compare_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const& supported, VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(VkPhysicalDeviceDeviceMemoryReportFeaturesEXT & current, VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_device_memory_report)) +#if (defined(VK_EXT_custom_border_color)) +void compare_VkPhysicalDeviceCustomBorderColorFeaturesEXT(std::vector & error_list, VkPhysicalDeviceCustomBorderColorFeaturesEXT const& supported, VkPhysicalDeviceCustomBorderColorFeaturesEXT const& requested); +void merge_VkPhysicalDeviceCustomBorderColorFeaturesEXT(VkPhysicalDeviceCustomBorderColorFeaturesEXT & current, VkPhysicalDeviceCustomBorderColorFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_custom_border_color)) +#if (defined(VK_NV_present_barrier)) +void compare_VkPhysicalDevicePresentBarrierFeaturesNV(std::vector & error_list, VkPhysicalDevicePresentBarrierFeaturesNV const& supported, VkPhysicalDevicePresentBarrierFeaturesNV const& requested); +void merge_VkPhysicalDevicePresentBarrierFeaturesNV(VkPhysicalDevicePresentBarrierFeaturesNV & current, VkPhysicalDevicePresentBarrierFeaturesNV const& merge_in); +#endif //(defined(VK_NV_present_barrier)) +#if (defined(VK_NV_device_diagnostics_config)) +void compare_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(std::vector & error_list, VkPhysicalDeviceDiagnosticsConfigFeaturesNV const& supported, VkPhysicalDeviceDiagnosticsConfigFeaturesNV const& requested); +void merge_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(VkPhysicalDeviceDiagnosticsConfigFeaturesNV & current, VkPhysicalDeviceDiagnosticsConfigFeaturesNV const& merge_in); +#endif //(defined(VK_NV_device_diagnostics_config)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_cuda_kernel_launch)) +void compare_VkPhysicalDeviceCudaKernelLaunchFeaturesNV(std::vector & error_list, VkPhysicalDeviceCudaKernelLaunchFeaturesNV const& supported, VkPhysicalDeviceCudaKernelLaunchFeaturesNV const& requested); +void merge_VkPhysicalDeviceCudaKernelLaunchFeaturesNV(VkPhysicalDeviceCudaKernelLaunchFeaturesNV & current, VkPhysicalDeviceCudaKernelLaunchFeaturesNV const& merge_in); +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_cuda_kernel_launch)) +#if (defined(VK_QCOM_tile_shading)) +void compare_VkPhysicalDeviceTileShadingFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceTileShadingFeaturesQCOM const& supported, VkPhysicalDeviceTileShadingFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceTileShadingFeaturesQCOM(VkPhysicalDeviceTileShadingFeaturesQCOM & current, VkPhysicalDeviceTileShadingFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_tile_shading)) +#if (defined(VK_EXT_descriptor_buffer)) +void compare_VkPhysicalDeviceDescriptorBufferFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDescriptorBufferFeaturesEXT const& supported, VkPhysicalDeviceDescriptorBufferFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDescriptorBufferFeaturesEXT(VkPhysicalDeviceDescriptorBufferFeaturesEXT & current, VkPhysicalDeviceDescriptorBufferFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_descriptor_buffer)) +#if (defined(VK_EXT_graphics_pipeline_library)) +void compare_VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT(std::vector & error_list, VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT const& supported, VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT const& requested); +void merge_VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT(VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT & current, VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_graphics_pipeline_library)) +#if (defined(VK_AMD_shader_early_and_late_fragment_tests)) +void compare_VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD(std::vector & error_list, VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD const& supported, VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD const& requested); +void merge_VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD(VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD & current, VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD const& merge_in); +#endif //(defined(VK_AMD_shader_early_and_late_fragment_tests)) +#if (defined(VK_NV_fragment_shading_rate_enums)) +void compare_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(std::vector & error_list, VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& supported, VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& requested); +void merge_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV & current, VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& merge_in); +#endif //(defined(VK_NV_fragment_shading_rate_enums)) +#if (defined(VK_NV_ray_tracing_motion_blur)) +void compare_VkPhysicalDeviceRayTracingMotionBlurFeaturesNV(std::vector & error_list, VkPhysicalDeviceRayTracingMotionBlurFeaturesNV const& supported, VkPhysicalDeviceRayTracingMotionBlurFeaturesNV const& requested); +void merge_VkPhysicalDeviceRayTracingMotionBlurFeaturesNV(VkPhysicalDeviceRayTracingMotionBlurFeaturesNV & current, VkPhysicalDeviceRayTracingMotionBlurFeaturesNV const& merge_in); +#endif //(defined(VK_NV_ray_tracing_motion_blur)) +#if (defined(VK_EXT_ycbcr_2plane_444_formats)) +void compare_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT(std::vector & error_list, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT const& supported, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT const& requested); +void merge_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT(VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT & current, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_ycbcr_2plane_444_formats)) +#if (defined(VK_EXT_fragment_density_map2)) +void compare_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(std::vector & error_list, VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const& supported, VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const& requested); +void merge_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(VkPhysicalDeviceFragmentDensityMap2FeaturesEXT & current, VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_fragment_density_map2)) +#if (defined(VK_EXT_image_compression_control)) +void compare_VkPhysicalDeviceImageCompressionControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageCompressionControlFeaturesEXT const& supported, VkPhysicalDeviceImageCompressionControlFeaturesEXT const& requested); +void merge_VkPhysicalDeviceImageCompressionControlFeaturesEXT(VkPhysicalDeviceImageCompressionControlFeaturesEXT & current, VkPhysicalDeviceImageCompressionControlFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_image_compression_control)) +#if (defined(VK_EXT_attachment_feedback_loop_layout)) +void compare_VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT(std::vector & error_list, VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT const& supported, VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT const& requested); +void merge_VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT(VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT & current, VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_attachment_feedback_loop_layout)) +#if (defined(VK_EXT_4444_formats)) +void compare_VkPhysicalDevice4444FormatsFeaturesEXT(std::vector & error_list, VkPhysicalDevice4444FormatsFeaturesEXT const& supported, VkPhysicalDevice4444FormatsFeaturesEXT const& requested); +void merge_VkPhysicalDevice4444FormatsFeaturesEXT(VkPhysicalDevice4444FormatsFeaturesEXT & current, VkPhysicalDevice4444FormatsFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_4444_formats)) +#if (defined(VK_EXT_device_fault)) +void compare_VkPhysicalDeviceFaultFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFaultFeaturesEXT const& supported, VkPhysicalDeviceFaultFeaturesEXT const& requested); +void merge_VkPhysicalDeviceFaultFeaturesEXT(VkPhysicalDeviceFaultFeaturesEXT & current, VkPhysicalDeviceFaultFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_device_fault)) +#if (defined(VK_EXT_rasterization_order_attachment_access)) +void compare_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT(std::vector & error_list, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT const& supported, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT const& requested); +void merge_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT(VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT & current, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_rasterization_order_attachment_access)) +#if (defined(VK_ARM_rasterization_order_attachment_access)) +void compare_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM(std::vector & error_list, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM const& supported, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM const& requested); +void merge_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM(VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM & current, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_rasterization_order_attachment_access)) +#if (defined(VK_EXT_rgba10x6_formats)) +void compare_VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT(std::vector & error_list, VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT const& supported, VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT const& requested); +void merge_VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT(VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT & current, VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_rgba10x6_formats)) +#if (defined(VK_EXT_mutable_descriptor_type)) +void compare_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT const& supported, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT const& requested); +void merge_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT(VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT & current, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_mutable_descriptor_type)) +#if (defined(VK_VALVE_mutable_descriptor_type)) +void compare_VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE(std::vector & error_list, VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE const& supported, VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE const& requested); +void merge_VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE(VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE & current, VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE const& merge_in); +#endif //(defined(VK_VALVE_mutable_descriptor_type)) +#if (defined(VK_EXT_vertex_input_dynamic_state)) +void compare_VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT(std::vector & error_list, VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT const& supported, VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT const& requested); +void merge_VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT(VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT & current, VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_vertex_input_dynamic_state)) +#if (defined(VK_EXT_device_address_binding_report)) +void compare_VkPhysicalDeviceAddressBindingReportFeaturesEXT(std::vector & error_list, VkPhysicalDeviceAddressBindingReportFeaturesEXT const& supported, VkPhysicalDeviceAddressBindingReportFeaturesEXT const& requested); +void merge_VkPhysicalDeviceAddressBindingReportFeaturesEXT(VkPhysicalDeviceAddressBindingReportFeaturesEXT & current, VkPhysicalDeviceAddressBindingReportFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_device_address_binding_report)) +#if (defined(VK_EXT_depth_clip_control)) +void compare_VkPhysicalDeviceDepthClipControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthClipControlFeaturesEXT const& supported, VkPhysicalDeviceDepthClipControlFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDepthClipControlFeaturesEXT(VkPhysicalDeviceDepthClipControlFeaturesEXT & current, VkPhysicalDeviceDepthClipControlFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_depth_clip_control)) +#if (defined(VK_EXT_primitive_topology_list_restart)) +void compare_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT(std::vector & error_list, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT const& supported, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT const& requested); +void merge_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT(VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT & current, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_primitive_topology_list_restart)) +#if (defined(VK_HUAWEI_subpass_shading)) +void compare_VkPhysicalDeviceSubpassShadingFeaturesHUAWEI(std::vector & error_list, VkPhysicalDeviceSubpassShadingFeaturesHUAWEI const& supported, VkPhysicalDeviceSubpassShadingFeaturesHUAWEI const& requested); +void merge_VkPhysicalDeviceSubpassShadingFeaturesHUAWEI(VkPhysicalDeviceSubpassShadingFeaturesHUAWEI & current, VkPhysicalDeviceSubpassShadingFeaturesHUAWEI const& merge_in); +#endif //(defined(VK_HUAWEI_subpass_shading)) +#if (defined(VK_HUAWEI_invocation_mask)) +void compare_VkPhysicalDeviceInvocationMaskFeaturesHUAWEI(std::vector & error_list, VkPhysicalDeviceInvocationMaskFeaturesHUAWEI const& supported, VkPhysicalDeviceInvocationMaskFeaturesHUAWEI const& requested); +void merge_VkPhysicalDeviceInvocationMaskFeaturesHUAWEI(VkPhysicalDeviceInvocationMaskFeaturesHUAWEI & current, VkPhysicalDeviceInvocationMaskFeaturesHUAWEI const& merge_in); +#endif //(defined(VK_HUAWEI_invocation_mask)) +#if (defined(VK_NV_external_memory_rdma)) +void compare_VkPhysicalDeviceExternalMemoryRDMAFeaturesNV(std::vector & error_list, VkPhysicalDeviceExternalMemoryRDMAFeaturesNV const& supported, VkPhysicalDeviceExternalMemoryRDMAFeaturesNV const& requested); +void merge_VkPhysicalDeviceExternalMemoryRDMAFeaturesNV(VkPhysicalDeviceExternalMemoryRDMAFeaturesNV & current, VkPhysicalDeviceExternalMemoryRDMAFeaturesNV const& merge_in); +#endif //(defined(VK_NV_external_memory_rdma)) +#if (defined(VK_EXT_pipeline_properties)) +void compare_VkPhysicalDevicePipelinePropertiesFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelinePropertiesFeaturesEXT const& supported, VkPhysicalDevicePipelinePropertiesFeaturesEXT const& requested); +void merge_VkPhysicalDevicePipelinePropertiesFeaturesEXT(VkPhysicalDevicePipelinePropertiesFeaturesEXT & current, VkPhysicalDevicePipelinePropertiesFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_pipeline_properties)) +#if (defined(VK_EXT_frame_boundary)) +void compare_VkPhysicalDeviceFrameBoundaryFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFrameBoundaryFeaturesEXT const& supported, VkPhysicalDeviceFrameBoundaryFeaturesEXT const& requested); +void merge_VkPhysicalDeviceFrameBoundaryFeaturesEXT(VkPhysicalDeviceFrameBoundaryFeaturesEXT & current, VkPhysicalDeviceFrameBoundaryFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_frame_boundary)) +#if (defined(VK_EXT_multisampled_render_to_single_sampled)) +void compare_VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT const& supported, VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT const& requested); +void merge_VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT(VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT & current, VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_multisampled_render_to_single_sampled)) +#if (defined(VK_EXT_extended_dynamic_state2)) +void compare_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT(std::vector & error_list, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT const& supported, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT const& requested); +void merge_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT(VkPhysicalDeviceExtendedDynamicState2FeaturesEXT & current, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_extended_dynamic_state2)) +#if (defined(VK_EXT_color_write_enable)) +void compare_VkPhysicalDeviceColorWriteEnableFeaturesEXT(std::vector & error_list, VkPhysicalDeviceColorWriteEnableFeaturesEXT const& supported, VkPhysicalDeviceColorWriteEnableFeaturesEXT const& requested); +void merge_VkPhysicalDeviceColorWriteEnableFeaturesEXT(VkPhysicalDeviceColorWriteEnableFeaturesEXT & current, VkPhysicalDeviceColorWriteEnableFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_color_write_enable)) +#if (defined(VK_EXT_primitives_generated_query)) +void compare_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT(std::vector & error_list, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT const& supported, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT const& requested); +void merge_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT(VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT & current, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_primitives_generated_query)) +#if (defined(VK_VALVE_video_encode_rgb_conversion)) +void compare_VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE(std::vector & error_list, VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE const& supported, VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE const& requested); +void merge_VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE(VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE & current, VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE const& merge_in); +#endif //(defined(VK_VALVE_video_encode_rgb_conversion)) +#if (defined(VK_EXT_image_view_min_lod)) +void compare_VkPhysicalDeviceImageViewMinLodFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageViewMinLodFeaturesEXT const& supported, VkPhysicalDeviceImageViewMinLodFeaturesEXT const& requested); +void merge_VkPhysicalDeviceImageViewMinLodFeaturesEXT(VkPhysicalDeviceImageViewMinLodFeaturesEXT & current, VkPhysicalDeviceImageViewMinLodFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_image_view_min_lod)) +#if (defined(VK_EXT_multi_draw)) +void compare_VkPhysicalDeviceMultiDrawFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMultiDrawFeaturesEXT const& supported, VkPhysicalDeviceMultiDrawFeaturesEXT const& requested); +void merge_VkPhysicalDeviceMultiDrawFeaturesEXT(VkPhysicalDeviceMultiDrawFeaturesEXT & current, VkPhysicalDeviceMultiDrawFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_multi_draw)) +#if (defined(VK_EXT_image_2d_view_of_3d)) +void compare_VkPhysicalDeviceImage2DViewOf3DFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImage2DViewOf3DFeaturesEXT const& supported, VkPhysicalDeviceImage2DViewOf3DFeaturesEXT const& requested); +void merge_VkPhysicalDeviceImage2DViewOf3DFeaturesEXT(VkPhysicalDeviceImage2DViewOf3DFeaturesEXT & current, VkPhysicalDeviceImage2DViewOf3DFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_image_2d_view_of_3d)) +#if (defined(VK_EXT_shader_tile_image)) +void compare_VkPhysicalDeviceShaderTileImageFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderTileImageFeaturesEXT const& supported, VkPhysicalDeviceShaderTileImageFeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderTileImageFeaturesEXT(VkPhysicalDeviceShaderTileImageFeaturesEXT & current, VkPhysicalDeviceShaderTileImageFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_tile_image)) +#if (defined(VK_EXT_opacity_micromap)) +void compare_VkPhysicalDeviceOpacityMicromapFeaturesEXT(std::vector & error_list, VkPhysicalDeviceOpacityMicromapFeaturesEXT const& supported, VkPhysicalDeviceOpacityMicromapFeaturesEXT const& requested); +void merge_VkPhysicalDeviceOpacityMicromapFeaturesEXT(VkPhysicalDeviceOpacityMicromapFeaturesEXT & current, VkPhysicalDeviceOpacityMicromapFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_opacity_micromap)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_displacement_micromap)) +void compare_VkPhysicalDeviceDisplacementMicromapFeaturesNV(std::vector & error_list, VkPhysicalDeviceDisplacementMicromapFeaturesNV const& supported, VkPhysicalDeviceDisplacementMicromapFeaturesNV const& requested); +void merge_VkPhysicalDeviceDisplacementMicromapFeaturesNV(VkPhysicalDeviceDisplacementMicromapFeaturesNV & current, VkPhysicalDeviceDisplacementMicromapFeaturesNV const& merge_in); +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_displacement_micromap)) +#if (defined(VK_HUAWEI_cluster_culling_shader)) +void compare_VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI(std::vector & error_list, VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI const& supported, VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI const& requested); +void merge_VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI(VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI & current, VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI const& merge_in); +#endif //(defined(VK_HUAWEI_cluster_culling_shader)) +#if (defined(VK_EXT_border_color_swizzle)) +void compare_VkPhysicalDeviceBorderColorSwizzleFeaturesEXT(std::vector & error_list, VkPhysicalDeviceBorderColorSwizzleFeaturesEXT const& supported, VkPhysicalDeviceBorderColorSwizzleFeaturesEXT const& requested); +void merge_VkPhysicalDeviceBorderColorSwizzleFeaturesEXT(VkPhysicalDeviceBorderColorSwizzleFeaturesEXT & current, VkPhysicalDeviceBorderColorSwizzleFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_border_color_swizzle)) +#if (defined(VK_EXT_pageable_device_local_memory)) +void compare_VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT(std::vector & error_list, VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT const& supported, VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT const& requested); +void merge_VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT(VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT & current, VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_pageable_device_local_memory)) +#if (defined(VK_ARM_scheduling_controls)) +void compare_VkPhysicalDeviceSchedulingControlsFeaturesARM(std::vector & error_list, VkPhysicalDeviceSchedulingControlsFeaturesARM const& supported, VkPhysicalDeviceSchedulingControlsFeaturesARM const& requested); +void merge_VkPhysicalDeviceSchedulingControlsFeaturesARM(VkPhysicalDeviceSchedulingControlsFeaturesARM & current, VkPhysicalDeviceSchedulingControlsFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_scheduling_controls)) +#if (defined(VK_EXT_image_sliced_view_of_3d)) +void compare_VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT const& supported, VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT const& requested); +void merge_VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT(VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT & current, VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_image_sliced_view_of_3d)) +#if (defined(VK_VALVE_descriptor_set_host_mapping)) +void compare_VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE(std::vector & error_list, VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE const& supported, VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE const& requested); +void merge_VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE(VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE & current, VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE const& merge_in); +#endif //(defined(VK_VALVE_descriptor_set_host_mapping)) +#if (defined(VK_EXT_non_seamless_cube_map)) +void compare_VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT(std::vector & error_list, VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT const& supported, VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT const& requested); +void merge_VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT(VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT & current, VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_non_seamless_cube_map)) +#if (defined(VK_ARM_render_pass_striped)) +void compare_VkPhysicalDeviceRenderPassStripedFeaturesARM(std::vector & error_list, VkPhysicalDeviceRenderPassStripedFeaturesARM const& supported, VkPhysicalDeviceRenderPassStripedFeaturesARM const& requested); +void merge_VkPhysicalDeviceRenderPassStripedFeaturesARM(VkPhysicalDeviceRenderPassStripedFeaturesARM & current, VkPhysicalDeviceRenderPassStripedFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_render_pass_striped)) +#if (defined(VK_EXT_fragment_density_map_offset)) +void compare_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT const& supported, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT const& requested); +void merge_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT(VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT & current, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_fragment_density_map_offset)) +#if (defined(VK_QCOM_fragment_density_map_offset)) +void compare_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM const& supported, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM(VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM & current, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_fragment_density_map_offset)) +#if (defined(VK_NV_copy_memory_indirect)) +void compare_VkPhysicalDeviceCopyMemoryIndirectFeaturesNV(std::vector & error_list, VkPhysicalDeviceCopyMemoryIndirectFeaturesNV const& supported, VkPhysicalDeviceCopyMemoryIndirectFeaturesNV const& requested); +void merge_VkPhysicalDeviceCopyMemoryIndirectFeaturesNV(VkPhysicalDeviceCopyMemoryIndirectFeaturesNV & current, VkPhysicalDeviceCopyMemoryIndirectFeaturesNV const& merge_in); +#endif //(defined(VK_NV_copy_memory_indirect)) +#if (defined(VK_EXT_memory_decompression)) +void compare_VkPhysicalDeviceMemoryDecompressionFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMemoryDecompressionFeaturesEXT const& supported, VkPhysicalDeviceMemoryDecompressionFeaturesEXT const& requested); +void merge_VkPhysicalDeviceMemoryDecompressionFeaturesEXT(VkPhysicalDeviceMemoryDecompressionFeaturesEXT & current, VkPhysicalDeviceMemoryDecompressionFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_memory_decompression)) +#if (defined(VK_NV_memory_decompression)) +void compare_VkPhysicalDeviceMemoryDecompressionFeaturesNV(std::vector & error_list, VkPhysicalDeviceMemoryDecompressionFeaturesNV const& supported, VkPhysicalDeviceMemoryDecompressionFeaturesNV const& requested); +void merge_VkPhysicalDeviceMemoryDecompressionFeaturesNV(VkPhysicalDeviceMemoryDecompressionFeaturesNV & current, VkPhysicalDeviceMemoryDecompressionFeaturesNV const& merge_in); +#endif //(defined(VK_NV_memory_decompression)) +#if (defined(VK_NV_device_generated_commands_compute)) +void compare_VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV(std::vector & error_list, VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV const& supported, VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV const& requested); +void merge_VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV(VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV & current, VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV const& merge_in); +#endif //(defined(VK_NV_device_generated_commands_compute)) +#if (defined(VK_NV_ray_tracing_linear_swept_spheres)) +void compare_VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV(std::vector & error_list, VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV const& supported, VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV const& requested); +void merge_VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV(VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV & current, VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV const& merge_in); +#endif //(defined(VK_NV_ray_tracing_linear_swept_spheres)) +#if (defined(VK_NV_linear_color_attachment)) +void compare_VkPhysicalDeviceLinearColorAttachmentFeaturesNV(std::vector & error_list, VkPhysicalDeviceLinearColorAttachmentFeaturesNV const& supported, VkPhysicalDeviceLinearColorAttachmentFeaturesNV const& requested); +void merge_VkPhysicalDeviceLinearColorAttachmentFeaturesNV(VkPhysicalDeviceLinearColorAttachmentFeaturesNV & current, VkPhysicalDeviceLinearColorAttachmentFeaturesNV const& merge_in); +#endif //(defined(VK_NV_linear_color_attachment)) +#if (defined(VK_EXT_image_compression_control_swapchain)) +void compare_VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT const& supported, VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT const& requested); +void merge_VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT(VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT & current, VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_image_compression_control_swapchain)) +#if (defined(VK_QCOM_image_processing)) +void compare_VkPhysicalDeviceImageProcessingFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceImageProcessingFeaturesQCOM const& supported, VkPhysicalDeviceImageProcessingFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceImageProcessingFeaturesQCOM(VkPhysicalDeviceImageProcessingFeaturesQCOM & current, VkPhysicalDeviceImageProcessingFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_image_processing)) +#if (defined(VK_EXT_nested_command_buffer)) +void compare_VkPhysicalDeviceNestedCommandBufferFeaturesEXT(std::vector & error_list, VkPhysicalDeviceNestedCommandBufferFeaturesEXT const& supported, VkPhysicalDeviceNestedCommandBufferFeaturesEXT const& requested); +void merge_VkPhysicalDeviceNestedCommandBufferFeaturesEXT(VkPhysicalDeviceNestedCommandBufferFeaturesEXT & current, VkPhysicalDeviceNestedCommandBufferFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_nested_command_buffer)) +#if (defined(VK_EXT_extended_dynamic_state3)) +void compare_VkPhysicalDeviceExtendedDynamicState3FeaturesEXT(std::vector & error_list, VkPhysicalDeviceExtendedDynamicState3FeaturesEXT const& supported, VkPhysicalDeviceExtendedDynamicState3FeaturesEXT const& requested); +void merge_VkPhysicalDeviceExtendedDynamicState3FeaturesEXT(VkPhysicalDeviceExtendedDynamicState3FeaturesEXT & current, VkPhysicalDeviceExtendedDynamicState3FeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_extended_dynamic_state3)) +#if (defined(VK_EXT_subpass_merge_feedback)) +void compare_VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT(std::vector & error_list, VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT const& supported, VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT const& requested); +void merge_VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT(VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT & current, VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_subpass_merge_feedback)) +#if (defined(VK_ARM_tensors)) +void compare_VkPhysicalDeviceTensorFeaturesARM(std::vector & error_list, VkPhysicalDeviceTensorFeaturesARM const& supported, VkPhysicalDeviceTensorFeaturesARM const& requested); +void merge_VkPhysicalDeviceTensorFeaturesARM(VkPhysicalDeviceTensorFeaturesARM & current, VkPhysicalDeviceTensorFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_tensors)) +#if (defined(VK_ARM_tensors)) +void compare_VkPhysicalDeviceDescriptorBufferTensorFeaturesARM(std::vector & error_list, VkPhysicalDeviceDescriptorBufferTensorFeaturesARM const& supported, VkPhysicalDeviceDescriptorBufferTensorFeaturesARM const& requested); +void merge_VkPhysicalDeviceDescriptorBufferTensorFeaturesARM(VkPhysicalDeviceDescriptorBufferTensorFeaturesARM & current, VkPhysicalDeviceDescriptorBufferTensorFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_tensors)) +#if (defined(VK_EXT_shader_module_identifier)) +void compare_VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT const& supported, VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT(VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT & current, VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_module_identifier)) +#if (defined(VK_NV_optical_flow)) +void compare_VkPhysicalDeviceOpticalFlowFeaturesNV(std::vector & error_list, VkPhysicalDeviceOpticalFlowFeaturesNV const& supported, VkPhysicalDeviceOpticalFlowFeaturesNV const& requested); +void merge_VkPhysicalDeviceOpticalFlowFeaturesNV(VkPhysicalDeviceOpticalFlowFeaturesNV & current, VkPhysicalDeviceOpticalFlowFeaturesNV const& merge_in); +#endif //(defined(VK_NV_optical_flow)) +#if (defined(VK_EXT_legacy_dithering)) +void compare_VkPhysicalDeviceLegacyDitheringFeaturesEXT(std::vector & error_list, VkPhysicalDeviceLegacyDitheringFeaturesEXT const& supported, VkPhysicalDeviceLegacyDitheringFeaturesEXT const& requested); +void merge_VkPhysicalDeviceLegacyDitheringFeaturesEXT(VkPhysicalDeviceLegacyDitheringFeaturesEXT & current, VkPhysicalDeviceLegacyDitheringFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_legacy_dithering)) +#if defined(VK_USE_PLATFORM_ANDROID_KHR) && (defined(VK_ANDROID_external_format_resolve)) +void compare_VkPhysicalDeviceExternalFormatResolveFeaturesANDROID(std::vector & error_list, VkPhysicalDeviceExternalFormatResolveFeaturesANDROID const& supported, VkPhysicalDeviceExternalFormatResolveFeaturesANDROID const& requested); +void merge_VkPhysicalDeviceExternalFormatResolveFeaturesANDROID(VkPhysicalDeviceExternalFormatResolveFeaturesANDROID & current, VkPhysicalDeviceExternalFormatResolveFeaturesANDROID const& merge_in); +#endif //defined(VK_USE_PLATFORM_ANDROID_KHR) && (defined(VK_ANDROID_external_format_resolve)) +#if (defined(VK_AMD_anti_lag)) +void compare_VkPhysicalDeviceAntiLagFeaturesAMD(std::vector & error_list, VkPhysicalDeviceAntiLagFeaturesAMD const& supported, VkPhysicalDeviceAntiLagFeaturesAMD const& requested); +void merge_VkPhysicalDeviceAntiLagFeaturesAMD(VkPhysicalDeviceAntiLagFeaturesAMD & current, VkPhysicalDeviceAntiLagFeaturesAMD const& merge_in); +#endif //(defined(VK_AMD_anti_lag)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_dense_geometry_format)) +void compare_VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX(std::vector & error_list, VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX const& supported, VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX const& requested); +void merge_VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX(VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX & current, VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX const& merge_in); +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_dense_geometry_format)) +#if (defined(VK_EXT_shader_object)) +void compare_VkPhysicalDeviceShaderObjectFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderObjectFeaturesEXT const& supported, VkPhysicalDeviceShaderObjectFeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderObjectFeaturesEXT(VkPhysicalDeviceShaderObjectFeaturesEXT & current, VkPhysicalDeviceShaderObjectFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_object)) +#if (defined(VK_QCOM_tile_properties)) +void compare_VkPhysicalDeviceTilePropertiesFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceTilePropertiesFeaturesQCOM const& supported, VkPhysicalDeviceTilePropertiesFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceTilePropertiesFeaturesQCOM(VkPhysicalDeviceTilePropertiesFeaturesQCOM & current, VkPhysicalDeviceTilePropertiesFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_tile_properties)) +#if (defined(VK_SEC_amigo_profiling)) +void compare_VkPhysicalDeviceAmigoProfilingFeaturesSEC(std::vector & error_list, VkPhysicalDeviceAmigoProfilingFeaturesSEC const& supported, VkPhysicalDeviceAmigoProfilingFeaturesSEC const& requested); +void merge_VkPhysicalDeviceAmigoProfilingFeaturesSEC(VkPhysicalDeviceAmigoProfilingFeaturesSEC & current, VkPhysicalDeviceAmigoProfilingFeaturesSEC const& merge_in); +#endif //(defined(VK_SEC_amigo_profiling)) +#if (defined(VK_QCOM_multiview_per_view_viewports)) +void compare_VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM const& supported, VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM(VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM & current, VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_multiview_per_view_viewports)) +#if (defined(VK_NV_ray_tracing_invocation_reorder)) +void compare_VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV(std::vector & error_list, VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV const& supported, VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV const& requested); +void merge_VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV(VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV & current, VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV const& merge_in); +#endif //(defined(VK_NV_ray_tracing_invocation_reorder)) +#if (defined(VK_NV_cooperative_vector)) +void compare_VkPhysicalDeviceCooperativeVectorFeaturesNV(std::vector & error_list, VkPhysicalDeviceCooperativeVectorFeaturesNV const& supported, VkPhysicalDeviceCooperativeVectorFeaturesNV const& requested); +void merge_VkPhysicalDeviceCooperativeVectorFeaturesNV(VkPhysicalDeviceCooperativeVectorFeaturesNV & current, VkPhysicalDeviceCooperativeVectorFeaturesNV const& merge_in); +#endif //(defined(VK_NV_cooperative_vector)) +#if (defined(VK_NV_extended_sparse_address_space)) +void compare_VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV(std::vector & error_list, VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV const& supported, VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV const& requested); +void merge_VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV(VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV & current, VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV const& merge_in); +#endif //(defined(VK_NV_extended_sparse_address_space)) +#if (defined(VK_EXT_legacy_vertex_attributes)) +void compare_VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT(std::vector & error_list, VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const& supported, VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const& requested); +void merge_VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT(VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT & current, VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_legacy_vertex_attributes)) +#if (defined(VK_ARM_shader_core_builtins)) +void compare_VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM(std::vector & error_list, VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM const& supported, VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM const& requested); +void merge_VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM(VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM & current, VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_shader_core_builtins)) +#if (defined(VK_EXT_pipeline_library_group_handles)) +void compare_VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT const& supported, VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT const& requested); +void merge_VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT(VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT & current, VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_pipeline_library_group_handles)) +#if (defined(VK_EXT_dynamic_rendering_unused_attachments)) +void compare_VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT const& supported, VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT(VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT & current, VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_dynamic_rendering_unused_attachments)) +#if (defined(VK_ARM_data_graph)) +void compare_VkPhysicalDeviceDataGraphFeaturesARM(std::vector & error_list, VkPhysicalDeviceDataGraphFeaturesARM const& supported, VkPhysicalDeviceDataGraphFeaturesARM const& requested); +void merge_VkPhysicalDeviceDataGraphFeaturesARM(VkPhysicalDeviceDataGraphFeaturesARM & current, VkPhysicalDeviceDataGraphFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_data_graph)) +#if (defined(VK_QCOM_multiview_per_view_render_areas)) +void compare_VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM const& supported, VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM(VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM & current, VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_multiview_per_view_render_areas)) +#if (defined(VK_NV_per_stage_descriptor_set)) +void compare_VkPhysicalDevicePerStageDescriptorSetFeaturesNV(std::vector & error_list, VkPhysicalDevicePerStageDescriptorSetFeaturesNV const& supported, VkPhysicalDevicePerStageDescriptorSetFeaturesNV const& requested); +void merge_VkPhysicalDevicePerStageDescriptorSetFeaturesNV(VkPhysicalDevicePerStageDescriptorSetFeaturesNV & current, VkPhysicalDevicePerStageDescriptorSetFeaturesNV const& merge_in); +#endif //(defined(VK_NV_per_stage_descriptor_set)) +#if (defined(VK_QCOM_image_processing2)) +void compare_VkPhysicalDeviceImageProcessing2FeaturesQCOM(std::vector & error_list, VkPhysicalDeviceImageProcessing2FeaturesQCOM const& supported, VkPhysicalDeviceImageProcessing2FeaturesQCOM const& requested); +void merge_VkPhysicalDeviceImageProcessing2FeaturesQCOM(VkPhysicalDeviceImageProcessing2FeaturesQCOM & current, VkPhysicalDeviceImageProcessing2FeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_image_processing2)) +#if (defined(VK_QCOM_filter_cubic_weights)) +void compare_VkPhysicalDeviceCubicWeightsFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceCubicWeightsFeaturesQCOM const& supported, VkPhysicalDeviceCubicWeightsFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceCubicWeightsFeaturesQCOM(VkPhysicalDeviceCubicWeightsFeaturesQCOM & current, VkPhysicalDeviceCubicWeightsFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_filter_cubic_weights)) +#if (defined(VK_QCOM_ycbcr_degamma)) +void compare_VkPhysicalDeviceYcbcrDegammaFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceYcbcrDegammaFeaturesQCOM const& supported, VkPhysicalDeviceYcbcrDegammaFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceYcbcrDegammaFeaturesQCOM(VkPhysicalDeviceYcbcrDegammaFeaturesQCOM & current, VkPhysicalDeviceYcbcrDegammaFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_ycbcr_degamma)) +#if (defined(VK_QCOM_filter_cubic_clamp)) +void compare_VkPhysicalDeviceCubicClampFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceCubicClampFeaturesQCOM const& supported, VkPhysicalDeviceCubicClampFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceCubicClampFeaturesQCOM(VkPhysicalDeviceCubicClampFeaturesQCOM & current, VkPhysicalDeviceCubicClampFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_filter_cubic_clamp)) +#if (defined(VK_EXT_attachment_feedback_loop_dynamic_state)) +void compare_VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT(std::vector & error_list, VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT const& supported, VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT const& requested); +void merge_VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT(VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT & current, VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_attachment_feedback_loop_dynamic_state)) +#if defined(VK_USE_PLATFORM_SCREEN_QNX) && (defined(VK_QNX_external_memory_screen_buffer)) +void compare_VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX(std::vector & error_list, VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX const& supported, VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX const& requested); +void merge_VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX(VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX & current, VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX const& merge_in); +#endif //defined(VK_USE_PLATFORM_SCREEN_QNX) && (defined(VK_QNX_external_memory_screen_buffer)) +#if (defined(VK_NV_descriptor_pool_overallocation)) +void compare_VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV(std::vector & error_list, VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV const& supported, VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV const& requested); +void merge_VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV(VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV & current, VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV const& merge_in); +#endif //(defined(VK_NV_descriptor_pool_overallocation)) +#if (defined(VK_QCOM_tile_memory_heap)) +void compare_VkPhysicalDeviceTileMemoryHeapFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceTileMemoryHeapFeaturesQCOM const& supported, VkPhysicalDeviceTileMemoryHeapFeaturesQCOM const& requested); +void merge_VkPhysicalDeviceTileMemoryHeapFeaturesQCOM(VkPhysicalDeviceTileMemoryHeapFeaturesQCOM & current, VkPhysicalDeviceTileMemoryHeapFeaturesQCOM const& merge_in); +#endif //(defined(VK_QCOM_tile_memory_heap)) +#if (defined(VK_NV_raw_access_chains)) +void compare_VkPhysicalDeviceRawAccessChainsFeaturesNV(std::vector & error_list, VkPhysicalDeviceRawAccessChainsFeaturesNV const& supported, VkPhysicalDeviceRawAccessChainsFeaturesNV const& requested); +void merge_VkPhysicalDeviceRawAccessChainsFeaturesNV(VkPhysicalDeviceRawAccessChainsFeaturesNV & current, VkPhysicalDeviceRawAccessChainsFeaturesNV const& merge_in); +#endif //(defined(VK_NV_raw_access_chains)) +#if (defined(VK_NV_command_buffer_inheritance)) +void compare_VkPhysicalDeviceCommandBufferInheritanceFeaturesNV(std::vector & error_list, VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const& supported, VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const& requested); +void merge_VkPhysicalDeviceCommandBufferInheritanceFeaturesNV(VkPhysicalDeviceCommandBufferInheritanceFeaturesNV & current, VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const& merge_in); +#endif //(defined(VK_NV_command_buffer_inheritance)) +#if (defined(VK_NV_shader_atomic_float16_vector)) +void compare_VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV(std::vector & error_list, VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV const& supported, VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV const& requested); +void merge_VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV(VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV & current, VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV const& merge_in); +#endif //(defined(VK_NV_shader_atomic_float16_vector)) +#if (defined(VK_EXT_shader_replicated_composites)) +void compare_VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const& supported, VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT(VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT & current, VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_replicated_composites)) +#if (defined(VK_EXT_shader_float8)) +void compare_VkPhysicalDeviceShaderFloat8FeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderFloat8FeaturesEXT const& supported, VkPhysicalDeviceShaderFloat8FeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderFloat8FeaturesEXT(VkPhysicalDeviceShaderFloat8FeaturesEXT & current, VkPhysicalDeviceShaderFloat8FeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_float8)) +#if (defined(VK_NV_ray_tracing_validation)) +void compare_VkPhysicalDeviceRayTracingValidationFeaturesNV(std::vector & error_list, VkPhysicalDeviceRayTracingValidationFeaturesNV const& supported, VkPhysicalDeviceRayTracingValidationFeaturesNV const& requested); +void merge_VkPhysicalDeviceRayTracingValidationFeaturesNV(VkPhysicalDeviceRayTracingValidationFeaturesNV & current, VkPhysicalDeviceRayTracingValidationFeaturesNV const& merge_in); +#endif //(defined(VK_NV_ray_tracing_validation)) +#if (defined(VK_NV_cluster_acceleration_structure)) +void compare_VkPhysicalDeviceClusterAccelerationStructureFeaturesNV(std::vector & error_list, VkPhysicalDeviceClusterAccelerationStructureFeaturesNV const& supported, VkPhysicalDeviceClusterAccelerationStructureFeaturesNV const& requested); +void merge_VkPhysicalDeviceClusterAccelerationStructureFeaturesNV(VkPhysicalDeviceClusterAccelerationStructureFeaturesNV & current, VkPhysicalDeviceClusterAccelerationStructureFeaturesNV const& merge_in); +#endif //(defined(VK_NV_cluster_acceleration_structure)) +#if (defined(VK_NV_partitioned_acceleration_structure)) +void compare_VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV(std::vector & error_list, VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV const& supported, VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV const& requested); +void merge_VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV(VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV & current, VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV const& merge_in); +#endif //(defined(VK_NV_partitioned_acceleration_structure)) +#if (defined(VK_EXT_device_generated_commands)) +void compare_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT const& supported, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT(VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT & current, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_device_generated_commands)) +#if (defined(VK_MESA_image_alignment_control)) +void compare_VkPhysicalDeviceImageAlignmentControlFeaturesMESA(std::vector & error_list, VkPhysicalDeviceImageAlignmentControlFeaturesMESA const& supported, VkPhysicalDeviceImageAlignmentControlFeaturesMESA const& requested); +void merge_VkPhysicalDeviceImageAlignmentControlFeaturesMESA(VkPhysicalDeviceImageAlignmentControlFeaturesMESA & current, VkPhysicalDeviceImageAlignmentControlFeaturesMESA const& merge_in); +#endif //(defined(VK_MESA_image_alignment_control)) +#if (defined(VK_EXT_depth_clamp_control)) +void compare_VkPhysicalDeviceDepthClampControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthClampControlFeaturesEXT const& supported, VkPhysicalDeviceDepthClampControlFeaturesEXT const& requested); +void merge_VkPhysicalDeviceDepthClampControlFeaturesEXT(VkPhysicalDeviceDepthClampControlFeaturesEXT & current, VkPhysicalDeviceDepthClampControlFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_depth_clamp_control)) +#if (defined(VK_HUAWEI_hdr_vivid)) +void compare_VkPhysicalDeviceHdrVividFeaturesHUAWEI(std::vector & error_list, VkPhysicalDeviceHdrVividFeaturesHUAWEI const& supported, VkPhysicalDeviceHdrVividFeaturesHUAWEI const& requested); +void merge_VkPhysicalDeviceHdrVividFeaturesHUAWEI(VkPhysicalDeviceHdrVividFeaturesHUAWEI & current, VkPhysicalDeviceHdrVividFeaturesHUAWEI const& merge_in); +#endif //(defined(VK_HUAWEI_hdr_vivid)) +#if (defined(VK_NV_cooperative_matrix2)) +void compare_VkPhysicalDeviceCooperativeMatrix2FeaturesNV(std::vector & error_list, VkPhysicalDeviceCooperativeMatrix2FeaturesNV const& supported, VkPhysicalDeviceCooperativeMatrix2FeaturesNV const& requested); +void merge_VkPhysicalDeviceCooperativeMatrix2FeaturesNV(VkPhysicalDeviceCooperativeMatrix2FeaturesNV & current, VkPhysicalDeviceCooperativeMatrix2FeaturesNV const& merge_in); +#endif //(defined(VK_NV_cooperative_matrix2)) +#if (defined(VK_ARM_pipeline_opacity_micromap)) +void compare_VkPhysicalDevicePipelineOpacityMicromapFeaturesARM(std::vector & error_list, VkPhysicalDevicePipelineOpacityMicromapFeaturesARM const& supported, VkPhysicalDevicePipelineOpacityMicromapFeaturesARM const& requested); +void merge_VkPhysicalDevicePipelineOpacityMicromapFeaturesARM(VkPhysicalDevicePipelineOpacityMicromapFeaturesARM & current, VkPhysicalDevicePipelineOpacityMicromapFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_pipeline_opacity_micromap)) +#if (defined(VK_EXT_vertex_attribute_robustness)) +void compare_VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT(std::vector & error_list, VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT const& supported, VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT const& requested); +void merge_VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT(VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT & current, VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_vertex_attribute_robustness)) +#if (defined(VK_ARM_format_pack)) +void compare_VkPhysicalDeviceFormatPackFeaturesARM(std::vector & error_list, VkPhysicalDeviceFormatPackFeaturesARM const& supported, VkPhysicalDeviceFormatPackFeaturesARM const& requested); +void merge_VkPhysicalDeviceFormatPackFeaturesARM(VkPhysicalDeviceFormatPackFeaturesARM & current, VkPhysicalDeviceFormatPackFeaturesARM const& merge_in); +#endif //(defined(VK_ARM_format_pack)) +#if (defined(VK_VALVE_fragment_density_map_layered)) +void compare_VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE(std::vector & error_list, VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE const& supported, VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE const& requested); +void merge_VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE(VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE & current, VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE const& merge_in); +#endif //(defined(VK_VALVE_fragment_density_map_layered)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_present_metering)) +void compare_VkPhysicalDevicePresentMeteringFeaturesNV(std::vector & error_list, VkPhysicalDevicePresentMeteringFeaturesNV const& supported, VkPhysicalDevicePresentMeteringFeaturesNV const& requested); +void merge_VkPhysicalDevicePresentMeteringFeaturesNV(VkPhysicalDevicePresentMeteringFeaturesNV & current, VkPhysicalDevicePresentMeteringFeaturesNV const& merge_in); +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_present_metering)) +#if (defined(VK_EXT_zero_initialize_device_memory)) +void compare_VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT(std::vector & error_list, VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT const& supported, VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT const& requested); +void merge_VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT(VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT & current, VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_zero_initialize_device_memory)) +#if (defined(VK_EXT_shader_64bit_indexing)) +void compare_VkPhysicalDeviceShader64BitIndexingFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShader64BitIndexingFeaturesEXT const& supported, VkPhysicalDeviceShader64BitIndexingFeaturesEXT const& requested); +void merge_VkPhysicalDeviceShader64BitIndexingFeaturesEXT(VkPhysicalDeviceShader64BitIndexingFeaturesEXT & current, VkPhysicalDeviceShader64BitIndexingFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_64bit_indexing)) +#if (defined(VK_SEC_pipeline_cache_incremental_mode)) +void compare_VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC(std::vector & error_list, VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC const& supported, VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC const& requested); +void merge_VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC(VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC & current, VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC const& merge_in); +#endif //(defined(VK_SEC_pipeline_cache_incremental_mode)) +#if (defined(VK_EXT_shader_uniform_buffer_unsized_array)) +void compare_VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT const& supported, VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT const& requested); +void merge_VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT(VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT & current, VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_shader_uniform_buffer_unsized_array)) +#if (defined(VK_KHR_acceleration_structure)) +void compare_VkPhysicalDeviceAccelerationStructureFeaturesKHR(std::vector & error_list, VkPhysicalDeviceAccelerationStructureFeaturesKHR const& supported, VkPhysicalDeviceAccelerationStructureFeaturesKHR const& requested); +void merge_VkPhysicalDeviceAccelerationStructureFeaturesKHR(VkPhysicalDeviceAccelerationStructureFeaturesKHR & current, VkPhysicalDeviceAccelerationStructureFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_acceleration_structure)) +#if (defined(VK_KHR_ray_tracing_pipeline)) +void compare_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(std::vector & error_list, VkPhysicalDeviceRayTracingPipelineFeaturesKHR const& supported, VkPhysicalDeviceRayTracingPipelineFeaturesKHR const& requested); +void merge_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(VkPhysicalDeviceRayTracingPipelineFeaturesKHR & current, VkPhysicalDeviceRayTracingPipelineFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_ray_tracing_pipeline)) +#if (defined(VK_KHR_ray_query)) +void compare_VkPhysicalDeviceRayQueryFeaturesKHR(std::vector & error_list, VkPhysicalDeviceRayQueryFeaturesKHR const& supported, VkPhysicalDeviceRayQueryFeaturesKHR const& requested); +void merge_VkPhysicalDeviceRayQueryFeaturesKHR(VkPhysicalDeviceRayQueryFeaturesKHR & current, VkPhysicalDeviceRayQueryFeaturesKHR const& merge_in); +#endif //(defined(VK_KHR_ray_query)) +#if (defined(VK_EXT_mesh_shader)) +void compare_VkPhysicalDeviceMeshShaderFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMeshShaderFeaturesEXT const& supported, VkPhysicalDeviceMeshShaderFeaturesEXT const& requested); +void merge_VkPhysicalDeviceMeshShaderFeaturesEXT(VkPhysicalDeviceMeshShaderFeaturesEXT & current, VkPhysicalDeviceMeshShaderFeaturesEXT const& merge_in); +#endif //(defined(VK_EXT_mesh_shader)) +void compare_feature_struct(VkStructureType sType, std::vector & error_list, const void* supported, const void* requested); +void merge_feature_struct(VkStructureType sType, void* current, const void* merge_in); +} // namespace vkb \ No newline at end of file diff --git a/extern/vk-bootstrap/src/VkBootstrapFeatureChain.inl b/extern/vk-bootstrap/src/VkBootstrapFeatureChain.inl new file mode 100644 index 0000000000..fc8543dfbe --- /dev/null +++ b/extern/vk-bootstrap/src/VkBootstrapFeatureChain.inl @@ -0,0 +1,7592 @@ +/* + * Copyright © 2025 Charles Giessen (charles@lunarg.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the “Software”), to deal in the Software without restriction, including without + * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +// This file is a part of VkBootstrap +// https://github.com/charles-lunarg/vk-bootstrap + +#pragma once +#include +#include +#include + +namespace vkb::detail { + +void compare_VkPhysicalDeviceFeatures(std::vector & error_list, VkPhysicalDeviceFeatures const& supported, VkPhysicalDeviceFeatures const& requested) { + if (requested.robustBufferAccess && !supported.robustBufferAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::robustBufferAccess"); + } + if (requested.fullDrawIndexUint32 && !supported.fullDrawIndexUint32) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::fullDrawIndexUint32"); + } + if (requested.imageCubeArray && !supported.imageCubeArray) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::imageCubeArray"); + } + if (requested.independentBlend && !supported.independentBlend) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::independentBlend"); + } + if (requested.geometryShader && !supported.geometryShader) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::geometryShader"); + } + if (requested.tessellationShader && !supported.tessellationShader) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::tessellationShader"); + } + if (requested.sampleRateShading && !supported.sampleRateShading) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sampleRateShading"); + } + if (requested.dualSrcBlend && !supported.dualSrcBlend) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::dualSrcBlend"); + } + if (requested.logicOp && !supported.logicOp) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::logicOp"); + } + if (requested.multiDrawIndirect && !supported.multiDrawIndirect) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::multiDrawIndirect"); + } + if (requested.drawIndirectFirstInstance && !supported.drawIndirectFirstInstance) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::drawIndirectFirstInstance"); + } + if (requested.depthClamp && !supported.depthClamp) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::depthClamp"); + } + if (requested.depthBiasClamp && !supported.depthBiasClamp) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::depthBiasClamp"); + } + if (requested.fillModeNonSolid && !supported.fillModeNonSolid) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::fillModeNonSolid"); + } + if (requested.depthBounds && !supported.depthBounds) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::depthBounds"); + } + if (requested.wideLines && !supported.wideLines) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::wideLines"); + } + if (requested.largePoints && !supported.largePoints) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::largePoints"); + } + if (requested.alphaToOne && !supported.alphaToOne) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::alphaToOne"); + } + if (requested.multiViewport && !supported.multiViewport) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::multiViewport"); + } + if (requested.samplerAnisotropy && !supported.samplerAnisotropy) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::samplerAnisotropy"); + } + if (requested.textureCompressionETC2 && !supported.textureCompressionETC2) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::textureCompressionETC2"); + } + if (requested.textureCompressionASTC_LDR && !supported.textureCompressionASTC_LDR) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::textureCompressionASTC_LDR"); + } + if (requested.textureCompressionBC && !supported.textureCompressionBC) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::textureCompressionBC"); + } + if (requested.occlusionQueryPrecise && !supported.occlusionQueryPrecise) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::occlusionQueryPrecise"); + } + if (requested.pipelineStatisticsQuery && !supported.pipelineStatisticsQuery) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::pipelineStatisticsQuery"); + } + if (requested.vertexPipelineStoresAndAtomics && !supported.vertexPipelineStoresAndAtomics) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::vertexPipelineStoresAndAtomics"); + } + if (requested.fragmentStoresAndAtomics && !supported.fragmentStoresAndAtomics) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::fragmentStoresAndAtomics"); + } + if (requested.shaderTessellationAndGeometryPointSize && !supported.shaderTessellationAndGeometryPointSize) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderTessellationAndGeometryPointSize"); + } + if (requested.shaderImageGatherExtended && !supported.shaderImageGatherExtended) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderImageGatherExtended"); + } + if (requested.shaderStorageImageExtendedFormats && !supported.shaderStorageImageExtendedFormats) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderStorageImageExtendedFormats"); + } + if (requested.shaderStorageImageMultisample && !supported.shaderStorageImageMultisample) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderStorageImageMultisample"); + } + if (requested.shaderStorageImageReadWithoutFormat && !supported.shaderStorageImageReadWithoutFormat) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderStorageImageReadWithoutFormat"); + } + if (requested.shaderStorageImageWriteWithoutFormat && !supported.shaderStorageImageWriteWithoutFormat) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderStorageImageWriteWithoutFormat"); + } + if (requested.shaderUniformBufferArrayDynamicIndexing && !supported.shaderUniformBufferArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderUniformBufferArrayDynamicIndexing"); + } + if (requested.shaderSampledImageArrayDynamicIndexing && !supported.shaderSampledImageArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderSampledImageArrayDynamicIndexing"); + } + if (requested.shaderStorageBufferArrayDynamicIndexing && !supported.shaderStorageBufferArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderStorageBufferArrayDynamicIndexing"); + } + if (requested.shaderStorageImageArrayDynamicIndexing && !supported.shaderStorageImageArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderStorageImageArrayDynamicIndexing"); + } + if (requested.shaderClipDistance && !supported.shaderClipDistance) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderClipDistance"); + } + if (requested.shaderCullDistance && !supported.shaderCullDistance) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderCullDistance"); + } + if (requested.shaderFloat64 && !supported.shaderFloat64) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderFloat64"); + } + if (requested.shaderInt64 && !supported.shaderInt64) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderInt64"); + } + if (requested.shaderInt16 && !supported.shaderInt16) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderInt16"); + } + if (requested.shaderResourceResidency && !supported.shaderResourceResidency) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderResourceResidency"); + } + if (requested.shaderResourceMinLod && !supported.shaderResourceMinLod) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::shaderResourceMinLod"); + } + if (requested.sparseBinding && !supported.sparseBinding) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseBinding"); + } + if (requested.sparseResidencyBuffer && !supported.sparseResidencyBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseResidencyBuffer"); + } + if (requested.sparseResidencyImage2D && !supported.sparseResidencyImage2D) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseResidencyImage2D"); + } + if (requested.sparseResidencyImage3D && !supported.sparseResidencyImage3D) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseResidencyImage3D"); + } + if (requested.sparseResidency2Samples && !supported.sparseResidency2Samples) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseResidency2Samples"); + } + if (requested.sparseResidency4Samples && !supported.sparseResidency4Samples) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseResidency4Samples"); + } + if (requested.sparseResidency8Samples && !supported.sparseResidency8Samples) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseResidency8Samples"); + } + if (requested.sparseResidency16Samples && !supported.sparseResidency16Samples) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseResidency16Samples"); + } + if (requested.sparseResidencyAliased && !supported.sparseResidencyAliased) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::sparseResidencyAliased"); + } + if (requested.variableMultisampleRate && !supported.variableMultisampleRate) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::variableMultisampleRate"); + } + if (requested.inheritedQueries && !supported.inheritedQueries) { + error_list.push_back("Missing feature VkPhysicalDeviceFeatures::inheritedQueries"); + } +} +void merge_VkPhysicalDeviceFeatures(VkPhysicalDeviceFeatures & current, VkPhysicalDeviceFeatures const& merge_in) { + current.robustBufferAccess = current.robustBufferAccess || merge_in.robustBufferAccess; + current.fullDrawIndexUint32 = current.fullDrawIndexUint32 || merge_in.fullDrawIndexUint32; + current.imageCubeArray = current.imageCubeArray || merge_in.imageCubeArray; + current.independentBlend = current.independentBlend || merge_in.independentBlend; + current.geometryShader = current.geometryShader || merge_in.geometryShader; + current.tessellationShader = current.tessellationShader || merge_in.tessellationShader; + current.sampleRateShading = current.sampleRateShading || merge_in.sampleRateShading; + current.dualSrcBlend = current.dualSrcBlend || merge_in.dualSrcBlend; + current.logicOp = current.logicOp || merge_in.logicOp; + current.multiDrawIndirect = current.multiDrawIndirect || merge_in.multiDrawIndirect; + current.drawIndirectFirstInstance = current.drawIndirectFirstInstance || merge_in.drawIndirectFirstInstance; + current.depthClamp = current.depthClamp || merge_in.depthClamp; + current.depthBiasClamp = current.depthBiasClamp || merge_in.depthBiasClamp; + current.fillModeNonSolid = current.fillModeNonSolid || merge_in.fillModeNonSolid; + current.depthBounds = current.depthBounds || merge_in.depthBounds; + current.wideLines = current.wideLines || merge_in.wideLines; + current.largePoints = current.largePoints || merge_in.largePoints; + current.alphaToOne = current.alphaToOne || merge_in.alphaToOne; + current.multiViewport = current.multiViewport || merge_in.multiViewport; + current.samplerAnisotropy = current.samplerAnisotropy || merge_in.samplerAnisotropy; + current.textureCompressionETC2 = current.textureCompressionETC2 || merge_in.textureCompressionETC2; + current.textureCompressionASTC_LDR = current.textureCompressionASTC_LDR || merge_in.textureCompressionASTC_LDR; + current.textureCompressionBC = current.textureCompressionBC || merge_in.textureCompressionBC; + current.occlusionQueryPrecise = current.occlusionQueryPrecise || merge_in.occlusionQueryPrecise; + current.pipelineStatisticsQuery = current.pipelineStatisticsQuery || merge_in.pipelineStatisticsQuery; + current.vertexPipelineStoresAndAtomics = current.vertexPipelineStoresAndAtomics || merge_in.vertexPipelineStoresAndAtomics; + current.fragmentStoresAndAtomics = current.fragmentStoresAndAtomics || merge_in.fragmentStoresAndAtomics; + current.shaderTessellationAndGeometryPointSize = current.shaderTessellationAndGeometryPointSize || merge_in.shaderTessellationAndGeometryPointSize; + current.shaderImageGatherExtended = current.shaderImageGatherExtended || merge_in.shaderImageGatherExtended; + current.shaderStorageImageExtendedFormats = current.shaderStorageImageExtendedFormats || merge_in.shaderStorageImageExtendedFormats; + current.shaderStorageImageMultisample = current.shaderStorageImageMultisample || merge_in.shaderStorageImageMultisample; + current.shaderStorageImageReadWithoutFormat = current.shaderStorageImageReadWithoutFormat || merge_in.shaderStorageImageReadWithoutFormat; + current.shaderStorageImageWriteWithoutFormat = current.shaderStorageImageWriteWithoutFormat || merge_in.shaderStorageImageWriteWithoutFormat; + current.shaderUniformBufferArrayDynamicIndexing = current.shaderUniformBufferArrayDynamicIndexing || merge_in.shaderUniformBufferArrayDynamicIndexing; + current.shaderSampledImageArrayDynamicIndexing = current.shaderSampledImageArrayDynamicIndexing || merge_in.shaderSampledImageArrayDynamicIndexing; + current.shaderStorageBufferArrayDynamicIndexing = current.shaderStorageBufferArrayDynamicIndexing || merge_in.shaderStorageBufferArrayDynamicIndexing; + current.shaderStorageImageArrayDynamicIndexing = current.shaderStorageImageArrayDynamicIndexing || merge_in.shaderStorageImageArrayDynamicIndexing; + current.shaderClipDistance = current.shaderClipDistance || merge_in.shaderClipDistance; + current.shaderCullDistance = current.shaderCullDistance || merge_in.shaderCullDistance; + current.shaderFloat64 = current.shaderFloat64 || merge_in.shaderFloat64; + current.shaderInt64 = current.shaderInt64 || merge_in.shaderInt64; + current.shaderInt16 = current.shaderInt16 || merge_in.shaderInt16; + current.shaderResourceResidency = current.shaderResourceResidency || merge_in.shaderResourceResidency; + current.shaderResourceMinLod = current.shaderResourceMinLod || merge_in.shaderResourceMinLod; + current.sparseBinding = current.sparseBinding || merge_in.sparseBinding; + current.sparseResidencyBuffer = current.sparseResidencyBuffer || merge_in.sparseResidencyBuffer; + current.sparseResidencyImage2D = current.sparseResidencyImage2D || merge_in.sparseResidencyImage2D; + current.sparseResidencyImage3D = current.sparseResidencyImage3D || merge_in.sparseResidencyImage3D; + current.sparseResidency2Samples = current.sparseResidency2Samples || merge_in.sparseResidency2Samples; + current.sparseResidency4Samples = current.sparseResidency4Samples || merge_in.sparseResidency4Samples; + current.sparseResidency8Samples = current.sparseResidency8Samples || merge_in.sparseResidency8Samples; + current.sparseResidency16Samples = current.sparseResidency16Samples || merge_in.sparseResidency16Samples; + current.sparseResidencyAliased = current.sparseResidencyAliased || merge_in.sparseResidencyAliased; + current.variableMultisampleRate = current.variableMultisampleRate || merge_in.variableMultisampleRate; + current.inheritedQueries = current.inheritedQueries || merge_in.inheritedQueries; +} +#if (defined(VK_VERSION_1_1)) +void compare_VkPhysicalDeviceProtectedMemoryFeatures(std::vector & error_list, VkPhysicalDeviceProtectedMemoryFeatures const& supported, VkPhysicalDeviceProtectedMemoryFeatures const& requested) { + if (requested.protectedMemory && !supported.protectedMemory) { + error_list.push_back("Missing feature VkPhysicalDeviceProtectedMemoryFeatures::protectedMemory"); + } +} +void merge_VkPhysicalDeviceProtectedMemoryFeatures(VkPhysicalDeviceProtectedMemoryFeatures & current, VkPhysicalDeviceProtectedMemoryFeatures const& merge_in) { + current.protectedMemory = current.protectedMemory || merge_in.protectedMemory; +} +#endif //(defined(VK_VERSION_1_1)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) +void compare_VkPhysicalDevice16BitStorageFeatures(std::vector & error_list, VkPhysicalDevice16BitStorageFeatures const& supported, VkPhysicalDevice16BitStorageFeatures const& requested) { + if (requested.storageBuffer16BitAccess && !supported.storageBuffer16BitAccess) { + error_list.push_back("Missing feature VkPhysicalDevice16BitStorageFeatures::storageBuffer16BitAccess"); + } + if (requested.uniformAndStorageBuffer16BitAccess && !supported.uniformAndStorageBuffer16BitAccess) { + error_list.push_back("Missing feature VkPhysicalDevice16BitStorageFeatures::uniformAndStorageBuffer16BitAccess"); + } + if (requested.storagePushConstant16 && !supported.storagePushConstant16) { + error_list.push_back("Missing feature VkPhysicalDevice16BitStorageFeatures::storagePushConstant16"); + } + if (requested.storageInputOutput16 && !supported.storageInputOutput16) { + error_list.push_back("Missing feature VkPhysicalDevice16BitStorageFeatures::storageInputOutput16"); + } +} +void merge_VkPhysicalDevice16BitStorageFeatures(VkPhysicalDevice16BitStorageFeatures & current, VkPhysicalDevice16BitStorageFeatures const& merge_in) { + current.storageBuffer16BitAccess = current.storageBuffer16BitAccess || merge_in.storageBuffer16BitAccess; + current.uniformAndStorageBuffer16BitAccess = current.uniformAndStorageBuffer16BitAccess || merge_in.uniformAndStorageBuffer16BitAccess; + current.storagePushConstant16 = current.storagePushConstant16 || merge_in.storagePushConstant16; + current.storageInputOutput16 = current.storageInputOutput16 || merge_in.storageInputOutput16; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) +void compare_VkPhysicalDevice16BitStorageFeaturesKHR(std::vector & error_list, VkPhysicalDevice16BitStorageFeaturesKHR const& supported, VkPhysicalDevice16BitStorageFeaturesKHR const& requested) { + if (requested.storageBuffer16BitAccess && !supported.storageBuffer16BitAccess) { + error_list.push_back("Missing feature VkPhysicalDevice16BitStorageFeaturesKHR::storageBuffer16BitAccess"); + } + if (requested.uniformAndStorageBuffer16BitAccess && !supported.uniformAndStorageBuffer16BitAccess) { + error_list.push_back("Missing feature VkPhysicalDevice16BitStorageFeaturesKHR::uniformAndStorageBuffer16BitAccess"); + } + if (requested.storagePushConstant16 && !supported.storagePushConstant16) { + error_list.push_back("Missing feature VkPhysicalDevice16BitStorageFeaturesKHR::storagePushConstant16"); + } + if (requested.storageInputOutput16 && !supported.storageInputOutput16) { + error_list.push_back("Missing feature VkPhysicalDevice16BitStorageFeaturesKHR::storageInputOutput16"); + } +} +void merge_VkPhysicalDevice16BitStorageFeaturesKHR(VkPhysicalDevice16BitStorageFeaturesKHR & current, VkPhysicalDevice16BitStorageFeaturesKHR const& merge_in) { + current.storageBuffer16BitAccess = current.storageBuffer16BitAccess || merge_in.storageBuffer16BitAccess; + current.uniformAndStorageBuffer16BitAccess = current.uniformAndStorageBuffer16BitAccess || merge_in.uniformAndStorageBuffer16BitAccess; + current.storagePushConstant16 = current.storagePushConstant16 || merge_in.storagePushConstant16; + current.storageInputOutput16 = current.storageInputOutput16 || merge_in.storageInputOutput16; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +void compare_VkPhysicalDeviceVariablePointersFeatures(std::vector & error_list, VkPhysicalDeviceVariablePointersFeatures const& supported, VkPhysicalDeviceVariablePointersFeatures const& requested) { + if (requested.variablePointersStorageBuffer && !supported.variablePointersStorageBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceVariablePointersFeatures::variablePointersStorageBuffer"); + } + if (requested.variablePointers && !supported.variablePointers) { + error_list.push_back("Missing feature VkPhysicalDeviceVariablePointersFeatures::variablePointers"); + } +} +void merge_VkPhysicalDeviceVariablePointersFeatures(VkPhysicalDeviceVariablePointersFeatures & current, VkPhysicalDeviceVariablePointersFeatures const& merge_in) { + current.variablePointersStorageBuffer = current.variablePointersStorageBuffer || merge_in.variablePointersStorageBuffer; + current.variablePointers = current.variablePointers || merge_in.variablePointers; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +void compare_VkPhysicalDeviceVariablePointerFeatures(std::vector & error_list, VkPhysicalDeviceVariablePointerFeatures const& supported, VkPhysicalDeviceVariablePointerFeatures const& requested) { + if (requested.variablePointersStorageBuffer && !supported.variablePointersStorageBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceVariablePointerFeatures::variablePointersStorageBuffer"); + } + if (requested.variablePointers && !supported.variablePointers) { + error_list.push_back("Missing feature VkPhysicalDeviceVariablePointerFeatures::variablePointers"); + } +} +void merge_VkPhysicalDeviceVariablePointerFeatures(VkPhysicalDeviceVariablePointerFeatures & current, VkPhysicalDeviceVariablePointerFeatures const& merge_in) { + current.variablePointersStorageBuffer = current.variablePointersStorageBuffer || merge_in.variablePointersStorageBuffer; + current.variablePointers = current.variablePointers || merge_in.variablePointers; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +void compare_VkPhysicalDeviceVariablePointerFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVariablePointerFeaturesKHR const& supported, VkPhysicalDeviceVariablePointerFeaturesKHR const& requested) { + if (requested.variablePointersStorageBuffer && !supported.variablePointersStorageBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceVariablePointerFeaturesKHR::variablePointersStorageBuffer"); + } + if (requested.variablePointers && !supported.variablePointers) { + error_list.push_back("Missing feature VkPhysicalDeviceVariablePointerFeaturesKHR::variablePointers"); + } +} +void merge_VkPhysicalDeviceVariablePointerFeaturesKHR(VkPhysicalDeviceVariablePointerFeaturesKHR & current, VkPhysicalDeviceVariablePointerFeaturesKHR const& merge_in) { + current.variablePointersStorageBuffer = current.variablePointersStorageBuffer || merge_in.variablePointersStorageBuffer; + current.variablePointers = current.variablePointers || merge_in.variablePointers; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +void compare_VkPhysicalDeviceVariablePointersFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVariablePointersFeaturesKHR const& supported, VkPhysicalDeviceVariablePointersFeaturesKHR const& requested) { + if (requested.variablePointersStorageBuffer && !supported.variablePointersStorageBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceVariablePointersFeaturesKHR::variablePointersStorageBuffer"); + } + if (requested.variablePointers && !supported.variablePointers) { + error_list.push_back("Missing feature VkPhysicalDeviceVariablePointersFeaturesKHR::variablePointers"); + } +} +void merge_VkPhysicalDeviceVariablePointersFeaturesKHR(VkPhysicalDeviceVariablePointersFeaturesKHR & current, VkPhysicalDeviceVariablePointersFeaturesKHR const& merge_in) { + current.variablePointersStorageBuffer = current.variablePointersStorageBuffer || merge_in.variablePointersStorageBuffer; + current.variablePointers = current.variablePointers || merge_in.variablePointers; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) +void compare_VkPhysicalDeviceSamplerYcbcrConversionFeatures(std::vector & error_list, VkPhysicalDeviceSamplerYcbcrConversionFeatures const& supported, VkPhysicalDeviceSamplerYcbcrConversionFeatures const& requested) { + if (requested.samplerYcbcrConversion && !supported.samplerYcbcrConversion) { + error_list.push_back("Missing feature VkPhysicalDeviceSamplerYcbcrConversionFeatures::samplerYcbcrConversion"); + } +} +void merge_VkPhysicalDeviceSamplerYcbcrConversionFeatures(VkPhysicalDeviceSamplerYcbcrConversionFeatures & current, VkPhysicalDeviceSamplerYcbcrConversionFeatures const& merge_in) { + current.samplerYcbcrConversion = current.samplerYcbcrConversion || merge_in.samplerYcbcrConversion; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) +void compare_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR(std::vector & error_list, VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR const& supported, VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR const& requested) { + if (requested.samplerYcbcrConversion && !supported.samplerYcbcrConversion) { + error_list.push_back("Missing feature VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR::samplerYcbcrConversion"); + } +} +void merge_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR(VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR & current, VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR const& merge_in) { + current.samplerYcbcrConversion = current.samplerYcbcrConversion || merge_in.samplerYcbcrConversion; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) +void compare_VkPhysicalDeviceMultiviewFeatures(std::vector & error_list, VkPhysicalDeviceMultiviewFeatures const& supported, VkPhysicalDeviceMultiviewFeatures const& requested) { + if (requested.multiview && !supported.multiview) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiviewFeatures::multiview"); + } + if (requested.multiviewGeometryShader && !supported.multiviewGeometryShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiviewFeatures::multiviewGeometryShader"); + } + if (requested.multiviewTessellationShader && !supported.multiviewTessellationShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiviewFeatures::multiviewTessellationShader"); + } +} +void merge_VkPhysicalDeviceMultiviewFeatures(VkPhysicalDeviceMultiviewFeatures & current, VkPhysicalDeviceMultiviewFeatures const& merge_in) { + current.multiview = current.multiview || merge_in.multiview; + current.multiviewGeometryShader = current.multiviewGeometryShader || merge_in.multiviewGeometryShader; + current.multiviewTessellationShader = current.multiviewTessellationShader || merge_in.multiviewTessellationShader; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) +void compare_VkPhysicalDeviceMultiviewFeaturesKHR(std::vector & error_list, VkPhysicalDeviceMultiviewFeaturesKHR const& supported, VkPhysicalDeviceMultiviewFeaturesKHR const& requested) { + if (requested.multiview && !supported.multiview) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiviewFeaturesKHR::multiview"); + } + if (requested.multiviewGeometryShader && !supported.multiviewGeometryShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiviewFeaturesKHR::multiviewGeometryShader"); + } + if (requested.multiviewTessellationShader && !supported.multiviewTessellationShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiviewFeaturesKHR::multiviewTessellationShader"); + } +} +void merge_VkPhysicalDeviceMultiviewFeaturesKHR(VkPhysicalDeviceMultiviewFeaturesKHR & current, VkPhysicalDeviceMultiviewFeaturesKHR const& merge_in) { + current.multiview = current.multiview || merge_in.multiview; + current.multiviewGeometryShader = current.multiviewGeometryShader || merge_in.multiviewGeometryShader; + current.multiviewTessellationShader = current.multiviewTessellationShader || merge_in.multiviewTessellationShader; +} +#endif //(defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) +#if (defined(VK_VERSION_1_1)) +void compare_VkPhysicalDeviceShaderDrawParametersFeatures(std::vector & error_list, VkPhysicalDeviceShaderDrawParametersFeatures const& supported, VkPhysicalDeviceShaderDrawParametersFeatures const& requested) { + if (requested.shaderDrawParameters && !supported.shaderDrawParameters) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderDrawParametersFeatures::shaderDrawParameters"); + } +} +void merge_VkPhysicalDeviceShaderDrawParametersFeatures(VkPhysicalDeviceShaderDrawParametersFeatures & current, VkPhysicalDeviceShaderDrawParametersFeatures const& merge_in) { + current.shaderDrawParameters = current.shaderDrawParameters || merge_in.shaderDrawParameters; +} +#endif //(defined(VK_VERSION_1_1)) +#if (defined(VK_VERSION_1_1)) +void compare_VkPhysicalDeviceShaderDrawParameterFeatures(std::vector & error_list, VkPhysicalDeviceShaderDrawParameterFeatures const& supported, VkPhysicalDeviceShaderDrawParameterFeatures const& requested) { + if (requested.shaderDrawParameters && !supported.shaderDrawParameters) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderDrawParameterFeatures::shaderDrawParameters"); + } +} +void merge_VkPhysicalDeviceShaderDrawParameterFeatures(VkPhysicalDeviceShaderDrawParameterFeatures & current, VkPhysicalDeviceShaderDrawParameterFeatures const& merge_in) { + current.shaderDrawParameters = current.shaderDrawParameters || merge_in.shaderDrawParameters; +} +#endif //(defined(VK_VERSION_1_1)) +#if (defined(VK_VERSION_1_2)) +void compare_VkPhysicalDeviceVulkan11Features(std::vector & error_list, VkPhysicalDeviceVulkan11Features const& supported, VkPhysicalDeviceVulkan11Features const& requested) { + if (requested.storageBuffer16BitAccess && !supported.storageBuffer16BitAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::storageBuffer16BitAccess"); + } + if (requested.uniformAndStorageBuffer16BitAccess && !supported.uniformAndStorageBuffer16BitAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::uniformAndStorageBuffer16BitAccess"); + } + if (requested.storagePushConstant16 && !supported.storagePushConstant16) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::storagePushConstant16"); + } + if (requested.storageInputOutput16 && !supported.storageInputOutput16) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::storageInputOutput16"); + } + if (requested.multiview && !supported.multiview) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::multiview"); + } + if (requested.multiviewGeometryShader && !supported.multiviewGeometryShader) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::multiviewGeometryShader"); + } + if (requested.multiviewTessellationShader && !supported.multiviewTessellationShader) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::multiviewTessellationShader"); + } + if (requested.variablePointersStorageBuffer && !supported.variablePointersStorageBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::variablePointersStorageBuffer"); + } + if (requested.variablePointers && !supported.variablePointers) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::variablePointers"); + } + if (requested.protectedMemory && !supported.protectedMemory) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::protectedMemory"); + } + if (requested.samplerYcbcrConversion && !supported.samplerYcbcrConversion) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::samplerYcbcrConversion"); + } + if (requested.shaderDrawParameters && !supported.shaderDrawParameters) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan11Features::shaderDrawParameters"); + } +} +void merge_VkPhysicalDeviceVulkan11Features(VkPhysicalDeviceVulkan11Features & current, VkPhysicalDeviceVulkan11Features const& merge_in) { + current.storageBuffer16BitAccess = current.storageBuffer16BitAccess || merge_in.storageBuffer16BitAccess; + current.uniformAndStorageBuffer16BitAccess = current.uniformAndStorageBuffer16BitAccess || merge_in.uniformAndStorageBuffer16BitAccess; + current.storagePushConstant16 = current.storagePushConstant16 || merge_in.storagePushConstant16; + current.storageInputOutput16 = current.storageInputOutput16 || merge_in.storageInputOutput16; + current.multiview = current.multiview || merge_in.multiview; + current.multiviewGeometryShader = current.multiviewGeometryShader || merge_in.multiviewGeometryShader; + current.multiviewTessellationShader = current.multiviewTessellationShader || merge_in.multiviewTessellationShader; + current.variablePointersStorageBuffer = current.variablePointersStorageBuffer || merge_in.variablePointersStorageBuffer; + current.variablePointers = current.variablePointers || merge_in.variablePointers; + current.protectedMemory = current.protectedMemory || merge_in.protectedMemory; + current.samplerYcbcrConversion = current.samplerYcbcrConversion || merge_in.samplerYcbcrConversion; + current.shaderDrawParameters = current.shaderDrawParameters || merge_in.shaderDrawParameters; +} +#endif //(defined(VK_VERSION_1_2)) +#if (defined(VK_VERSION_1_2)) +void compare_VkPhysicalDeviceVulkan12Features(std::vector & error_list, VkPhysicalDeviceVulkan12Features const& supported, VkPhysicalDeviceVulkan12Features const& requested) { + if (requested.samplerMirrorClampToEdge && !supported.samplerMirrorClampToEdge) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::samplerMirrorClampToEdge"); + } + if (requested.drawIndirectCount && !supported.drawIndirectCount) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::drawIndirectCount"); + } + if (requested.storageBuffer8BitAccess && !supported.storageBuffer8BitAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::storageBuffer8BitAccess"); + } + if (requested.uniformAndStorageBuffer8BitAccess && !supported.uniformAndStorageBuffer8BitAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::uniformAndStorageBuffer8BitAccess"); + } + if (requested.storagePushConstant8 && !supported.storagePushConstant8) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::storagePushConstant8"); + } + if (requested.shaderBufferInt64Atomics && !supported.shaderBufferInt64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderBufferInt64Atomics"); + } + if (requested.shaderSharedInt64Atomics && !supported.shaderSharedInt64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderSharedInt64Atomics"); + } + if (requested.shaderFloat16 && !supported.shaderFloat16) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderFloat16"); + } + if (requested.shaderInt8 && !supported.shaderInt8) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderInt8"); + } + if (requested.descriptorIndexing && !supported.descriptorIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorIndexing"); + } + if (requested.shaderInputAttachmentArrayDynamicIndexing && !supported.shaderInputAttachmentArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderInputAttachmentArrayDynamicIndexing"); + } + if (requested.shaderUniformTexelBufferArrayDynamicIndexing && !supported.shaderUniformTexelBufferArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderUniformTexelBufferArrayDynamicIndexing"); + } + if (requested.shaderStorageTexelBufferArrayDynamicIndexing && !supported.shaderStorageTexelBufferArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderStorageTexelBufferArrayDynamicIndexing"); + } + if (requested.shaderUniformBufferArrayNonUniformIndexing && !supported.shaderUniformBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderUniformBufferArrayNonUniformIndexing"); + } + if (requested.shaderSampledImageArrayNonUniformIndexing && !supported.shaderSampledImageArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderSampledImageArrayNonUniformIndexing"); + } + if (requested.shaderStorageBufferArrayNonUniformIndexing && !supported.shaderStorageBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderStorageBufferArrayNonUniformIndexing"); + } + if (requested.shaderStorageImageArrayNonUniformIndexing && !supported.shaderStorageImageArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderStorageImageArrayNonUniformIndexing"); + } + if (requested.shaderInputAttachmentArrayNonUniformIndexing && !supported.shaderInputAttachmentArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderInputAttachmentArrayNonUniformIndexing"); + } + if (requested.shaderUniformTexelBufferArrayNonUniformIndexing && !supported.shaderUniformTexelBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderUniformTexelBufferArrayNonUniformIndexing"); + } + if (requested.shaderStorageTexelBufferArrayNonUniformIndexing && !supported.shaderStorageTexelBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderStorageTexelBufferArrayNonUniformIndexing"); + } + if (requested.descriptorBindingUniformBufferUpdateAfterBind && !supported.descriptorBindingUniformBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingUniformBufferUpdateAfterBind"); + } + if (requested.descriptorBindingSampledImageUpdateAfterBind && !supported.descriptorBindingSampledImageUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingSampledImageUpdateAfterBind"); + } + if (requested.descriptorBindingStorageImageUpdateAfterBind && !supported.descriptorBindingStorageImageUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingStorageImageUpdateAfterBind"); + } + if (requested.descriptorBindingStorageBufferUpdateAfterBind && !supported.descriptorBindingStorageBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingStorageBufferUpdateAfterBind"); + } + if (requested.descriptorBindingUniformTexelBufferUpdateAfterBind && !supported.descriptorBindingUniformTexelBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingUniformTexelBufferUpdateAfterBind"); + } + if (requested.descriptorBindingStorageTexelBufferUpdateAfterBind && !supported.descriptorBindingStorageTexelBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingStorageTexelBufferUpdateAfterBind"); + } + if (requested.descriptorBindingUpdateUnusedWhilePending && !supported.descriptorBindingUpdateUnusedWhilePending) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingUpdateUnusedWhilePending"); + } + if (requested.descriptorBindingPartiallyBound && !supported.descriptorBindingPartiallyBound) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingPartiallyBound"); + } + if (requested.descriptorBindingVariableDescriptorCount && !supported.descriptorBindingVariableDescriptorCount) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::descriptorBindingVariableDescriptorCount"); + } + if (requested.runtimeDescriptorArray && !supported.runtimeDescriptorArray) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::runtimeDescriptorArray"); + } + if (requested.samplerFilterMinmax && !supported.samplerFilterMinmax) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::samplerFilterMinmax"); + } + if (requested.scalarBlockLayout && !supported.scalarBlockLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::scalarBlockLayout"); + } + if (requested.imagelessFramebuffer && !supported.imagelessFramebuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::imagelessFramebuffer"); + } + if (requested.uniformBufferStandardLayout && !supported.uniformBufferStandardLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::uniformBufferStandardLayout"); + } + if (requested.shaderSubgroupExtendedTypes && !supported.shaderSubgroupExtendedTypes) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderSubgroupExtendedTypes"); + } + if (requested.separateDepthStencilLayouts && !supported.separateDepthStencilLayouts) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::separateDepthStencilLayouts"); + } + if (requested.hostQueryReset && !supported.hostQueryReset) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::hostQueryReset"); + } + if (requested.timelineSemaphore && !supported.timelineSemaphore) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::timelineSemaphore"); + } + if (requested.bufferDeviceAddress && !supported.bufferDeviceAddress) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::bufferDeviceAddress"); + } + if (requested.bufferDeviceAddressCaptureReplay && !supported.bufferDeviceAddressCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::bufferDeviceAddressCaptureReplay"); + } + if (requested.bufferDeviceAddressMultiDevice && !supported.bufferDeviceAddressMultiDevice) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::bufferDeviceAddressMultiDevice"); + } + if (requested.vulkanMemoryModel && !supported.vulkanMemoryModel) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::vulkanMemoryModel"); + } + if (requested.vulkanMemoryModelDeviceScope && !supported.vulkanMemoryModelDeviceScope) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::vulkanMemoryModelDeviceScope"); + } + if (requested.vulkanMemoryModelAvailabilityVisibilityChains && !supported.vulkanMemoryModelAvailabilityVisibilityChains) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::vulkanMemoryModelAvailabilityVisibilityChains"); + } + if (requested.shaderOutputViewportIndex && !supported.shaderOutputViewportIndex) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderOutputViewportIndex"); + } + if (requested.shaderOutputLayer && !supported.shaderOutputLayer) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::shaderOutputLayer"); + } + if (requested.subgroupBroadcastDynamicId && !supported.subgroupBroadcastDynamicId) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan12Features::subgroupBroadcastDynamicId"); + } +} +void merge_VkPhysicalDeviceVulkan12Features(VkPhysicalDeviceVulkan12Features & current, VkPhysicalDeviceVulkan12Features const& merge_in) { + current.samplerMirrorClampToEdge = current.samplerMirrorClampToEdge || merge_in.samplerMirrorClampToEdge; + current.drawIndirectCount = current.drawIndirectCount || merge_in.drawIndirectCount; + current.storageBuffer8BitAccess = current.storageBuffer8BitAccess || merge_in.storageBuffer8BitAccess; + current.uniformAndStorageBuffer8BitAccess = current.uniformAndStorageBuffer8BitAccess || merge_in.uniformAndStorageBuffer8BitAccess; + current.storagePushConstant8 = current.storagePushConstant8 || merge_in.storagePushConstant8; + current.shaderBufferInt64Atomics = current.shaderBufferInt64Atomics || merge_in.shaderBufferInt64Atomics; + current.shaderSharedInt64Atomics = current.shaderSharedInt64Atomics || merge_in.shaderSharedInt64Atomics; + current.shaderFloat16 = current.shaderFloat16 || merge_in.shaderFloat16; + current.shaderInt8 = current.shaderInt8 || merge_in.shaderInt8; + current.descriptorIndexing = current.descriptorIndexing || merge_in.descriptorIndexing; + current.shaderInputAttachmentArrayDynamicIndexing = current.shaderInputAttachmentArrayDynamicIndexing || merge_in.shaderInputAttachmentArrayDynamicIndexing; + current.shaderUniformTexelBufferArrayDynamicIndexing = current.shaderUniformTexelBufferArrayDynamicIndexing || merge_in.shaderUniformTexelBufferArrayDynamicIndexing; + current.shaderStorageTexelBufferArrayDynamicIndexing = current.shaderStorageTexelBufferArrayDynamicIndexing || merge_in.shaderStorageTexelBufferArrayDynamicIndexing; + current.shaderUniformBufferArrayNonUniformIndexing = current.shaderUniformBufferArrayNonUniformIndexing || merge_in.shaderUniformBufferArrayNonUniformIndexing; + current.shaderSampledImageArrayNonUniformIndexing = current.shaderSampledImageArrayNonUniformIndexing || merge_in.shaderSampledImageArrayNonUniformIndexing; + current.shaderStorageBufferArrayNonUniformIndexing = current.shaderStorageBufferArrayNonUniformIndexing || merge_in.shaderStorageBufferArrayNonUniformIndexing; + current.shaderStorageImageArrayNonUniformIndexing = current.shaderStorageImageArrayNonUniformIndexing || merge_in.shaderStorageImageArrayNonUniformIndexing; + current.shaderInputAttachmentArrayNonUniformIndexing = current.shaderInputAttachmentArrayNonUniformIndexing || merge_in.shaderInputAttachmentArrayNonUniformIndexing; + current.shaderUniformTexelBufferArrayNonUniformIndexing = current.shaderUniformTexelBufferArrayNonUniformIndexing || merge_in.shaderUniformTexelBufferArrayNonUniformIndexing; + current.shaderStorageTexelBufferArrayNonUniformIndexing = current.shaderStorageTexelBufferArrayNonUniformIndexing || merge_in.shaderStorageTexelBufferArrayNonUniformIndexing; + current.descriptorBindingUniformBufferUpdateAfterBind = current.descriptorBindingUniformBufferUpdateAfterBind || merge_in.descriptorBindingUniformBufferUpdateAfterBind; + current.descriptorBindingSampledImageUpdateAfterBind = current.descriptorBindingSampledImageUpdateAfterBind || merge_in.descriptorBindingSampledImageUpdateAfterBind; + current.descriptorBindingStorageImageUpdateAfterBind = current.descriptorBindingStorageImageUpdateAfterBind || merge_in.descriptorBindingStorageImageUpdateAfterBind; + current.descriptorBindingStorageBufferUpdateAfterBind = current.descriptorBindingStorageBufferUpdateAfterBind || merge_in.descriptorBindingStorageBufferUpdateAfterBind; + current.descriptorBindingUniformTexelBufferUpdateAfterBind = current.descriptorBindingUniformTexelBufferUpdateAfterBind || merge_in.descriptorBindingUniformTexelBufferUpdateAfterBind; + current.descriptorBindingStorageTexelBufferUpdateAfterBind = current.descriptorBindingStorageTexelBufferUpdateAfterBind || merge_in.descriptorBindingStorageTexelBufferUpdateAfterBind; + current.descriptorBindingUpdateUnusedWhilePending = current.descriptorBindingUpdateUnusedWhilePending || merge_in.descriptorBindingUpdateUnusedWhilePending; + current.descriptorBindingPartiallyBound = current.descriptorBindingPartiallyBound || merge_in.descriptorBindingPartiallyBound; + current.descriptorBindingVariableDescriptorCount = current.descriptorBindingVariableDescriptorCount || merge_in.descriptorBindingVariableDescriptorCount; + current.runtimeDescriptorArray = current.runtimeDescriptorArray || merge_in.runtimeDescriptorArray; + current.samplerFilterMinmax = current.samplerFilterMinmax || merge_in.samplerFilterMinmax; + current.scalarBlockLayout = current.scalarBlockLayout || merge_in.scalarBlockLayout; + current.imagelessFramebuffer = current.imagelessFramebuffer || merge_in.imagelessFramebuffer; + current.uniformBufferStandardLayout = current.uniformBufferStandardLayout || merge_in.uniformBufferStandardLayout; + current.shaderSubgroupExtendedTypes = current.shaderSubgroupExtendedTypes || merge_in.shaderSubgroupExtendedTypes; + current.separateDepthStencilLayouts = current.separateDepthStencilLayouts || merge_in.separateDepthStencilLayouts; + current.hostQueryReset = current.hostQueryReset || merge_in.hostQueryReset; + current.timelineSemaphore = current.timelineSemaphore || merge_in.timelineSemaphore; + current.bufferDeviceAddress = current.bufferDeviceAddress || merge_in.bufferDeviceAddress; + current.bufferDeviceAddressCaptureReplay = current.bufferDeviceAddressCaptureReplay || merge_in.bufferDeviceAddressCaptureReplay; + current.bufferDeviceAddressMultiDevice = current.bufferDeviceAddressMultiDevice || merge_in.bufferDeviceAddressMultiDevice; + current.vulkanMemoryModel = current.vulkanMemoryModel || merge_in.vulkanMemoryModel; + current.vulkanMemoryModelDeviceScope = current.vulkanMemoryModelDeviceScope || merge_in.vulkanMemoryModelDeviceScope; + current.vulkanMemoryModelAvailabilityVisibilityChains = current.vulkanMemoryModelAvailabilityVisibilityChains || merge_in.vulkanMemoryModelAvailabilityVisibilityChains; + current.shaderOutputViewportIndex = current.shaderOutputViewportIndex || merge_in.shaderOutputViewportIndex; + current.shaderOutputLayer = current.shaderOutputLayer || merge_in.shaderOutputLayer; + current.subgroupBroadcastDynamicId = current.subgroupBroadcastDynamicId || merge_in.subgroupBroadcastDynamicId; +} +#endif //(defined(VK_VERSION_1_2)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) +void compare_VkPhysicalDeviceVulkanMemoryModelFeatures(std::vector & error_list, VkPhysicalDeviceVulkanMemoryModelFeatures const& supported, VkPhysicalDeviceVulkanMemoryModelFeatures const& requested) { + if (requested.vulkanMemoryModel && !supported.vulkanMemoryModel) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkanMemoryModelFeatures::vulkanMemoryModel"); + } + if (requested.vulkanMemoryModelDeviceScope && !supported.vulkanMemoryModelDeviceScope) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkanMemoryModelFeatures::vulkanMemoryModelDeviceScope"); + } + if (requested.vulkanMemoryModelAvailabilityVisibilityChains && !supported.vulkanMemoryModelAvailabilityVisibilityChains) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkanMemoryModelFeatures::vulkanMemoryModelAvailabilityVisibilityChains"); + } +} +void merge_VkPhysicalDeviceVulkanMemoryModelFeatures(VkPhysicalDeviceVulkanMemoryModelFeatures & current, VkPhysicalDeviceVulkanMemoryModelFeatures const& merge_in) { + current.vulkanMemoryModel = current.vulkanMemoryModel || merge_in.vulkanMemoryModel; + current.vulkanMemoryModelDeviceScope = current.vulkanMemoryModelDeviceScope || merge_in.vulkanMemoryModelDeviceScope; + current.vulkanMemoryModelAvailabilityVisibilityChains = current.vulkanMemoryModelAvailabilityVisibilityChains || merge_in.vulkanMemoryModelAvailabilityVisibilityChains; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) +void compare_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVulkanMemoryModelFeaturesKHR const& supported, VkPhysicalDeviceVulkanMemoryModelFeaturesKHR const& requested) { + if (requested.vulkanMemoryModel && !supported.vulkanMemoryModel) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkanMemoryModelFeaturesKHR::vulkanMemoryModel"); + } + if (requested.vulkanMemoryModelDeviceScope && !supported.vulkanMemoryModelDeviceScope) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkanMemoryModelFeaturesKHR::vulkanMemoryModelDeviceScope"); + } + if (requested.vulkanMemoryModelAvailabilityVisibilityChains && !supported.vulkanMemoryModelAvailabilityVisibilityChains) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkanMemoryModelFeaturesKHR::vulkanMemoryModelAvailabilityVisibilityChains"); + } +} +void merge_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR(VkPhysicalDeviceVulkanMemoryModelFeaturesKHR & current, VkPhysicalDeviceVulkanMemoryModelFeaturesKHR const& merge_in) { + current.vulkanMemoryModel = current.vulkanMemoryModel || merge_in.vulkanMemoryModel; + current.vulkanMemoryModelDeviceScope = current.vulkanMemoryModelDeviceScope || merge_in.vulkanMemoryModelDeviceScope; + current.vulkanMemoryModelAvailabilityVisibilityChains = current.vulkanMemoryModelAvailabilityVisibilityChains || merge_in.vulkanMemoryModelAvailabilityVisibilityChains; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) +void compare_VkPhysicalDeviceHostQueryResetFeatures(std::vector & error_list, VkPhysicalDeviceHostQueryResetFeatures const& supported, VkPhysicalDeviceHostQueryResetFeatures const& requested) { + if (requested.hostQueryReset && !supported.hostQueryReset) { + error_list.push_back("Missing feature VkPhysicalDeviceHostQueryResetFeatures::hostQueryReset"); + } +} +void merge_VkPhysicalDeviceHostQueryResetFeatures(VkPhysicalDeviceHostQueryResetFeatures & current, VkPhysicalDeviceHostQueryResetFeatures const& merge_in) { + current.hostQueryReset = current.hostQueryReset || merge_in.hostQueryReset; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) +void compare_VkPhysicalDeviceHostQueryResetFeaturesEXT(std::vector & error_list, VkPhysicalDeviceHostQueryResetFeaturesEXT const& supported, VkPhysicalDeviceHostQueryResetFeaturesEXT const& requested) { + if (requested.hostQueryReset && !supported.hostQueryReset) { + error_list.push_back("Missing feature VkPhysicalDeviceHostQueryResetFeaturesEXT::hostQueryReset"); + } +} +void merge_VkPhysicalDeviceHostQueryResetFeaturesEXT(VkPhysicalDeviceHostQueryResetFeaturesEXT & current, VkPhysicalDeviceHostQueryResetFeaturesEXT const& merge_in) { + current.hostQueryReset = current.hostQueryReset || merge_in.hostQueryReset; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) +void compare_VkPhysicalDeviceTimelineSemaphoreFeatures(std::vector & error_list, VkPhysicalDeviceTimelineSemaphoreFeatures const& supported, VkPhysicalDeviceTimelineSemaphoreFeatures const& requested) { + if (requested.timelineSemaphore && !supported.timelineSemaphore) { + error_list.push_back("Missing feature VkPhysicalDeviceTimelineSemaphoreFeatures::timelineSemaphore"); + } +} +void merge_VkPhysicalDeviceTimelineSemaphoreFeatures(VkPhysicalDeviceTimelineSemaphoreFeatures & current, VkPhysicalDeviceTimelineSemaphoreFeatures const& merge_in) { + current.timelineSemaphore = current.timelineSemaphore || merge_in.timelineSemaphore; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) +void compare_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR(std::vector & error_list, VkPhysicalDeviceTimelineSemaphoreFeaturesKHR const& supported, VkPhysicalDeviceTimelineSemaphoreFeaturesKHR const& requested) { + if (requested.timelineSemaphore && !supported.timelineSemaphore) { + error_list.push_back("Missing feature VkPhysicalDeviceTimelineSemaphoreFeaturesKHR::timelineSemaphore"); + } +} +void merge_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR(VkPhysicalDeviceTimelineSemaphoreFeaturesKHR & current, VkPhysicalDeviceTimelineSemaphoreFeaturesKHR const& merge_in) { + current.timelineSemaphore = current.timelineSemaphore || merge_in.timelineSemaphore; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) +void compare_VkPhysicalDeviceBufferDeviceAddressFeatures(std::vector & error_list, VkPhysicalDeviceBufferDeviceAddressFeatures const& supported, VkPhysicalDeviceBufferDeviceAddressFeatures const& requested) { + if (requested.bufferDeviceAddress && !supported.bufferDeviceAddress) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress"); + } + if (requested.bufferDeviceAddressCaptureReplay && !supported.bufferDeviceAddressCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddressCaptureReplay"); + } + if (requested.bufferDeviceAddressMultiDevice && !supported.bufferDeviceAddressMultiDevice) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddressMultiDevice"); + } +} +void merge_VkPhysicalDeviceBufferDeviceAddressFeatures(VkPhysicalDeviceBufferDeviceAddressFeatures & current, VkPhysicalDeviceBufferDeviceAddressFeatures const& merge_in) { + current.bufferDeviceAddress = current.bufferDeviceAddress || merge_in.bufferDeviceAddress; + current.bufferDeviceAddressCaptureReplay = current.bufferDeviceAddressCaptureReplay || merge_in.bufferDeviceAddressCaptureReplay; + current.bufferDeviceAddressMultiDevice = current.bufferDeviceAddressMultiDevice || merge_in.bufferDeviceAddressMultiDevice; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) +void compare_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR(std::vector & error_list, VkPhysicalDeviceBufferDeviceAddressFeaturesKHR const& supported, VkPhysicalDeviceBufferDeviceAddressFeaturesKHR const& requested) { + if (requested.bufferDeviceAddress && !supported.bufferDeviceAddress) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeaturesKHR::bufferDeviceAddress"); + } + if (requested.bufferDeviceAddressCaptureReplay && !supported.bufferDeviceAddressCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeaturesKHR::bufferDeviceAddressCaptureReplay"); + } + if (requested.bufferDeviceAddressMultiDevice && !supported.bufferDeviceAddressMultiDevice) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeaturesKHR::bufferDeviceAddressMultiDevice"); + } +} +void merge_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR(VkPhysicalDeviceBufferDeviceAddressFeaturesKHR & current, VkPhysicalDeviceBufferDeviceAddressFeaturesKHR const& merge_in) { + current.bufferDeviceAddress = current.bufferDeviceAddress || merge_in.bufferDeviceAddress; + current.bufferDeviceAddressCaptureReplay = current.bufferDeviceAddressCaptureReplay || merge_in.bufferDeviceAddressCaptureReplay; + current.bufferDeviceAddressMultiDevice = current.bufferDeviceAddressMultiDevice || merge_in.bufferDeviceAddressMultiDevice; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) +void compare_VkPhysicalDevice8BitStorageFeatures(std::vector & error_list, VkPhysicalDevice8BitStorageFeatures const& supported, VkPhysicalDevice8BitStorageFeatures const& requested) { + if (requested.storageBuffer8BitAccess && !supported.storageBuffer8BitAccess) { + error_list.push_back("Missing feature VkPhysicalDevice8BitStorageFeatures::storageBuffer8BitAccess"); + } + if (requested.uniformAndStorageBuffer8BitAccess && !supported.uniformAndStorageBuffer8BitAccess) { + error_list.push_back("Missing feature VkPhysicalDevice8BitStorageFeatures::uniformAndStorageBuffer8BitAccess"); + } + if (requested.storagePushConstant8 && !supported.storagePushConstant8) { + error_list.push_back("Missing feature VkPhysicalDevice8BitStorageFeatures::storagePushConstant8"); + } +} +void merge_VkPhysicalDevice8BitStorageFeatures(VkPhysicalDevice8BitStorageFeatures & current, VkPhysicalDevice8BitStorageFeatures const& merge_in) { + current.storageBuffer8BitAccess = current.storageBuffer8BitAccess || merge_in.storageBuffer8BitAccess; + current.uniformAndStorageBuffer8BitAccess = current.uniformAndStorageBuffer8BitAccess || merge_in.uniformAndStorageBuffer8BitAccess; + current.storagePushConstant8 = current.storagePushConstant8 || merge_in.storagePushConstant8; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) +void compare_VkPhysicalDevice8BitStorageFeaturesKHR(std::vector & error_list, VkPhysicalDevice8BitStorageFeaturesKHR const& supported, VkPhysicalDevice8BitStorageFeaturesKHR const& requested) { + if (requested.storageBuffer8BitAccess && !supported.storageBuffer8BitAccess) { + error_list.push_back("Missing feature VkPhysicalDevice8BitStorageFeaturesKHR::storageBuffer8BitAccess"); + } + if (requested.uniformAndStorageBuffer8BitAccess && !supported.uniformAndStorageBuffer8BitAccess) { + error_list.push_back("Missing feature VkPhysicalDevice8BitStorageFeaturesKHR::uniformAndStorageBuffer8BitAccess"); + } + if (requested.storagePushConstant8 && !supported.storagePushConstant8) { + error_list.push_back("Missing feature VkPhysicalDevice8BitStorageFeaturesKHR::storagePushConstant8"); + } +} +void merge_VkPhysicalDevice8BitStorageFeaturesKHR(VkPhysicalDevice8BitStorageFeaturesKHR & current, VkPhysicalDevice8BitStorageFeaturesKHR const& merge_in) { + current.storageBuffer8BitAccess = current.storageBuffer8BitAccess || merge_in.storageBuffer8BitAccess; + current.uniformAndStorageBuffer8BitAccess = current.uniformAndStorageBuffer8BitAccess || merge_in.uniformAndStorageBuffer8BitAccess; + current.storagePushConstant8 = current.storagePushConstant8 || merge_in.storagePushConstant8; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) +void compare_VkPhysicalDeviceShaderAtomicInt64Features(std::vector & error_list, VkPhysicalDeviceShaderAtomicInt64Features const& supported, VkPhysicalDeviceShaderAtomicInt64Features const& requested) { + if (requested.shaderBufferInt64Atomics && !supported.shaderBufferInt64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicInt64Features::shaderBufferInt64Atomics"); + } + if (requested.shaderSharedInt64Atomics && !supported.shaderSharedInt64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicInt64Features::shaderSharedInt64Atomics"); + } +} +void merge_VkPhysicalDeviceShaderAtomicInt64Features(VkPhysicalDeviceShaderAtomicInt64Features & current, VkPhysicalDeviceShaderAtomicInt64Features const& merge_in) { + current.shaderBufferInt64Atomics = current.shaderBufferInt64Atomics || merge_in.shaderBufferInt64Atomics; + current.shaderSharedInt64Atomics = current.shaderSharedInt64Atomics || merge_in.shaderSharedInt64Atomics; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) +void compare_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderAtomicInt64FeaturesKHR const& supported, VkPhysicalDeviceShaderAtomicInt64FeaturesKHR const& requested) { + if (requested.shaderBufferInt64Atomics && !supported.shaderBufferInt64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicInt64FeaturesKHR::shaderBufferInt64Atomics"); + } + if (requested.shaderSharedInt64Atomics && !supported.shaderSharedInt64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicInt64FeaturesKHR::shaderSharedInt64Atomics"); + } +} +void merge_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR(VkPhysicalDeviceShaderAtomicInt64FeaturesKHR & current, VkPhysicalDeviceShaderAtomicInt64FeaturesKHR const& merge_in) { + current.shaderBufferInt64Atomics = current.shaderBufferInt64Atomics || merge_in.shaderBufferInt64Atomics; + current.shaderSharedInt64Atomics = current.shaderSharedInt64Atomics || merge_in.shaderSharedInt64Atomics; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +void compare_VkPhysicalDeviceShaderFloat16Int8Features(std::vector & error_list, VkPhysicalDeviceShaderFloat16Int8Features const& supported, VkPhysicalDeviceShaderFloat16Int8Features const& requested) { + if (requested.shaderFloat16 && !supported.shaderFloat16) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFloat16Int8Features::shaderFloat16"); + } + if (requested.shaderInt8 && !supported.shaderInt8) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFloat16Int8Features::shaderInt8"); + } +} +void merge_VkPhysicalDeviceShaderFloat16Int8Features(VkPhysicalDeviceShaderFloat16Int8Features & current, VkPhysicalDeviceShaderFloat16Int8Features const& merge_in) { + current.shaderFloat16 = current.shaderFloat16 || merge_in.shaderFloat16; + current.shaderInt8 = current.shaderInt8 || merge_in.shaderInt8; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +void compare_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderFloat16Int8FeaturesKHR const& supported, VkPhysicalDeviceShaderFloat16Int8FeaturesKHR const& requested) { + if (requested.shaderFloat16 && !supported.shaderFloat16) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFloat16Int8FeaturesKHR::shaderFloat16"); + } + if (requested.shaderInt8 && !supported.shaderInt8) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFloat16Int8FeaturesKHR::shaderInt8"); + } +} +void merge_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR(VkPhysicalDeviceShaderFloat16Int8FeaturesKHR & current, VkPhysicalDeviceShaderFloat16Int8FeaturesKHR const& merge_in) { + current.shaderFloat16 = current.shaderFloat16 || merge_in.shaderFloat16; + current.shaderInt8 = current.shaderInt8 || merge_in.shaderInt8; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +void compare_VkPhysicalDeviceFloat16Int8FeaturesKHR(std::vector & error_list, VkPhysicalDeviceFloat16Int8FeaturesKHR const& supported, VkPhysicalDeviceFloat16Int8FeaturesKHR const& requested) { + if (requested.shaderFloat16 && !supported.shaderFloat16) { + error_list.push_back("Missing feature VkPhysicalDeviceFloat16Int8FeaturesKHR::shaderFloat16"); + } + if (requested.shaderInt8 && !supported.shaderInt8) { + error_list.push_back("Missing feature VkPhysicalDeviceFloat16Int8FeaturesKHR::shaderInt8"); + } +} +void merge_VkPhysicalDeviceFloat16Int8FeaturesKHR(VkPhysicalDeviceFloat16Int8FeaturesKHR & current, VkPhysicalDeviceFloat16Int8FeaturesKHR const& merge_in) { + current.shaderFloat16 = current.shaderFloat16 || merge_in.shaderFloat16; + current.shaderInt8 = current.shaderInt8 || merge_in.shaderInt8; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) +void compare_VkPhysicalDeviceDescriptorIndexingFeatures(std::vector & error_list, VkPhysicalDeviceDescriptorIndexingFeatures const& supported, VkPhysicalDeviceDescriptorIndexingFeatures const& requested) { + if (requested.shaderInputAttachmentArrayDynamicIndexing && !supported.shaderInputAttachmentArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderInputAttachmentArrayDynamicIndexing"); + } + if (requested.shaderUniformTexelBufferArrayDynamicIndexing && !supported.shaderUniformTexelBufferArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderUniformTexelBufferArrayDynamicIndexing"); + } + if (requested.shaderStorageTexelBufferArrayDynamicIndexing && !supported.shaderStorageTexelBufferArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderStorageTexelBufferArrayDynamicIndexing"); + } + if (requested.shaderUniformBufferArrayNonUniformIndexing && !supported.shaderUniformBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderUniformBufferArrayNonUniformIndexing"); + } + if (requested.shaderSampledImageArrayNonUniformIndexing && !supported.shaderSampledImageArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderSampledImageArrayNonUniformIndexing"); + } + if (requested.shaderStorageBufferArrayNonUniformIndexing && !supported.shaderStorageBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderStorageBufferArrayNonUniformIndexing"); + } + if (requested.shaderStorageImageArrayNonUniformIndexing && !supported.shaderStorageImageArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderStorageImageArrayNonUniformIndexing"); + } + if (requested.shaderInputAttachmentArrayNonUniformIndexing && !supported.shaderInputAttachmentArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderInputAttachmentArrayNonUniformIndexing"); + } + if (requested.shaderUniformTexelBufferArrayNonUniformIndexing && !supported.shaderUniformTexelBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderUniformTexelBufferArrayNonUniformIndexing"); + } + if (requested.shaderStorageTexelBufferArrayNonUniformIndexing && !supported.shaderStorageTexelBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::shaderStorageTexelBufferArrayNonUniformIndexing"); + } + if (requested.descriptorBindingUniformBufferUpdateAfterBind && !supported.descriptorBindingUniformBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingUniformBufferUpdateAfterBind"); + } + if (requested.descriptorBindingSampledImageUpdateAfterBind && !supported.descriptorBindingSampledImageUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingSampledImageUpdateAfterBind"); + } + if (requested.descriptorBindingStorageImageUpdateAfterBind && !supported.descriptorBindingStorageImageUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingStorageImageUpdateAfterBind"); + } + if (requested.descriptorBindingStorageBufferUpdateAfterBind && !supported.descriptorBindingStorageBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingStorageBufferUpdateAfterBind"); + } + if (requested.descriptorBindingUniformTexelBufferUpdateAfterBind && !supported.descriptorBindingUniformTexelBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingUniformTexelBufferUpdateAfterBind"); + } + if (requested.descriptorBindingStorageTexelBufferUpdateAfterBind && !supported.descriptorBindingStorageTexelBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingStorageTexelBufferUpdateAfterBind"); + } + if (requested.descriptorBindingUpdateUnusedWhilePending && !supported.descriptorBindingUpdateUnusedWhilePending) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingUpdateUnusedWhilePending"); + } + if (requested.descriptorBindingPartiallyBound && !supported.descriptorBindingPartiallyBound) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingPartiallyBound"); + } + if (requested.descriptorBindingVariableDescriptorCount && !supported.descriptorBindingVariableDescriptorCount) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::descriptorBindingVariableDescriptorCount"); + } + if (requested.runtimeDescriptorArray && !supported.runtimeDescriptorArray) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeatures::runtimeDescriptorArray"); + } +} +void merge_VkPhysicalDeviceDescriptorIndexingFeatures(VkPhysicalDeviceDescriptorIndexingFeatures & current, VkPhysicalDeviceDescriptorIndexingFeatures const& merge_in) { + current.shaderInputAttachmentArrayDynamicIndexing = current.shaderInputAttachmentArrayDynamicIndexing || merge_in.shaderInputAttachmentArrayDynamicIndexing; + current.shaderUniformTexelBufferArrayDynamicIndexing = current.shaderUniformTexelBufferArrayDynamicIndexing || merge_in.shaderUniformTexelBufferArrayDynamicIndexing; + current.shaderStorageTexelBufferArrayDynamicIndexing = current.shaderStorageTexelBufferArrayDynamicIndexing || merge_in.shaderStorageTexelBufferArrayDynamicIndexing; + current.shaderUniformBufferArrayNonUniformIndexing = current.shaderUniformBufferArrayNonUniformIndexing || merge_in.shaderUniformBufferArrayNonUniformIndexing; + current.shaderSampledImageArrayNonUniformIndexing = current.shaderSampledImageArrayNonUniformIndexing || merge_in.shaderSampledImageArrayNonUniformIndexing; + current.shaderStorageBufferArrayNonUniformIndexing = current.shaderStorageBufferArrayNonUniformIndexing || merge_in.shaderStorageBufferArrayNonUniformIndexing; + current.shaderStorageImageArrayNonUniformIndexing = current.shaderStorageImageArrayNonUniformIndexing || merge_in.shaderStorageImageArrayNonUniformIndexing; + current.shaderInputAttachmentArrayNonUniformIndexing = current.shaderInputAttachmentArrayNonUniformIndexing || merge_in.shaderInputAttachmentArrayNonUniformIndexing; + current.shaderUniformTexelBufferArrayNonUniformIndexing = current.shaderUniformTexelBufferArrayNonUniformIndexing || merge_in.shaderUniformTexelBufferArrayNonUniformIndexing; + current.shaderStorageTexelBufferArrayNonUniformIndexing = current.shaderStorageTexelBufferArrayNonUniformIndexing || merge_in.shaderStorageTexelBufferArrayNonUniformIndexing; + current.descriptorBindingUniformBufferUpdateAfterBind = current.descriptorBindingUniformBufferUpdateAfterBind || merge_in.descriptorBindingUniformBufferUpdateAfterBind; + current.descriptorBindingSampledImageUpdateAfterBind = current.descriptorBindingSampledImageUpdateAfterBind || merge_in.descriptorBindingSampledImageUpdateAfterBind; + current.descriptorBindingStorageImageUpdateAfterBind = current.descriptorBindingStorageImageUpdateAfterBind || merge_in.descriptorBindingStorageImageUpdateAfterBind; + current.descriptorBindingStorageBufferUpdateAfterBind = current.descriptorBindingStorageBufferUpdateAfterBind || merge_in.descriptorBindingStorageBufferUpdateAfterBind; + current.descriptorBindingUniformTexelBufferUpdateAfterBind = current.descriptorBindingUniformTexelBufferUpdateAfterBind || merge_in.descriptorBindingUniformTexelBufferUpdateAfterBind; + current.descriptorBindingStorageTexelBufferUpdateAfterBind = current.descriptorBindingStorageTexelBufferUpdateAfterBind || merge_in.descriptorBindingStorageTexelBufferUpdateAfterBind; + current.descriptorBindingUpdateUnusedWhilePending = current.descriptorBindingUpdateUnusedWhilePending || merge_in.descriptorBindingUpdateUnusedWhilePending; + current.descriptorBindingPartiallyBound = current.descriptorBindingPartiallyBound || merge_in.descriptorBindingPartiallyBound; + current.descriptorBindingVariableDescriptorCount = current.descriptorBindingVariableDescriptorCount || merge_in.descriptorBindingVariableDescriptorCount; + current.runtimeDescriptorArray = current.runtimeDescriptorArray || merge_in.runtimeDescriptorArray; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) +void compare_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDescriptorIndexingFeaturesEXT const& supported, VkPhysicalDeviceDescriptorIndexingFeaturesEXT const& requested) { + if (requested.shaderInputAttachmentArrayDynamicIndexing && !supported.shaderInputAttachmentArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderInputAttachmentArrayDynamicIndexing"); + } + if (requested.shaderUniformTexelBufferArrayDynamicIndexing && !supported.shaderUniformTexelBufferArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderUniformTexelBufferArrayDynamicIndexing"); + } + if (requested.shaderStorageTexelBufferArrayDynamicIndexing && !supported.shaderStorageTexelBufferArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderStorageTexelBufferArrayDynamicIndexing"); + } + if (requested.shaderUniformBufferArrayNonUniformIndexing && !supported.shaderUniformBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderUniformBufferArrayNonUniformIndexing"); + } + if (requested.shaderSampledImageArrayNonUniformIndexing && !supported.shaderSampledImageArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderSampledImageArrayNonUniformIndexing"); + } + if (requested.shaderStorageBufferArrayNonUniformIndexing && !supported.shaderStorageBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderStorageBufferArrayNonUniformIndexing"); + } + if (requested.shaderStorageImageArrayNonUniformIndexing && !supported.shaderStorageImageArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderStorageImageArrayNonUniformIndexing"); + } + if (requested.shaderInputAttachmentArrayNonUniformIndexing && !supported.shaderInputAttachmentArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderInputAttachmentArrayNonUniformIndexing"); + } + if (requested.shaderUniformTexelBufferArrayNonUniformIndexing && !supported.shaderUniformTexelBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderUniformTexelBufferArrayNonUniformIndexing"); + } + if (requested.shaderStorageTexelBufferArrayNonUniformIndexing && !supported.shaderStorageTexelBufferArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderStorageTexelBufferArrayNonUniformIndexing"); + } + if (requested.descriptorBindingUniformBufferUpdateAfterBind && !supported.descriptorBindingUniformBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingUniformBufferUpdateAfterBind"); + } + if (requested.descriptorBindingSampledImageUpdateAfterBind && !supported.descriptorBindingSampledImageUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingSampledImageUpdateAfterBind"); + } + if (requested.descriptorBindingStorageImageUpdateAfterBind && !supported.descriptorBindingStorageImageUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingStorageImageUpdateAfterBind"); + } + if (requested.descriptorBindingStorageBufferUpdateAfterBind && !supported.descriptorBindingStorageBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingStorageBufferUpdateAfterBind"); + } + if (requested.descriptorBindingUniformTexelBufferUpdateAfterBind && !supported.descriptorBindingUniformTexelBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingUniformTexelBufferUpdateAfterBind"); + } + if (requested.descriptorBindingStorageTexelBufferUpdateAfterBind && !supported.descriptorBindingStorageTexelBufferUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingStorageTexelBufferUpdateAfterBind"); + } + if (requested.descriptorBindingUpdateUnusedWhilePending && !supported.descriptorBindingUpdateUnusedWhilePending) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingUpdateUnusedWhilePending"); + } + if (requested.descriptorBindingPartiallyBound && !supported.descriptorBindingPartiallyBound) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingPartiallyBound"); + } + if (requested.descriptorBindingVariableDescriptorCount && !supported.descriptorBindingVariableDescriptorCount) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::descriptorBindingVariableDescriptorCount"); + } + if (requested.runtimeDescriptorArray && !supported.runtimeDescriptorArray) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorIndexingFeaturesEXT::runtimeDescriptorArray"); + } +} +void merge_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(VkPhysicalDeviceDescriptorIndexingFeaturesEXT & current, VkPhysicalDeviceDescriptorIndexingFeaturesEXT const& merge_in) { + current.shaderInputAttachmentArrayDynamicIndexing = current.shaderInputAttachmentArrayDynamicIndexing || merge_in.shaderInputAttachmentArrayDynamicIndexing; + current.shaderUniformTexelBufferArrayDynamicIndexing = current.shaderUniformTexelBufferArrayDynamicIndexing || merge_in.shaderUniformTexelBufferArrayDynamicIndexing; + current.shaderStorageTexelBufferArrayDynamicIndexing = current.shaderStorageTexelBufferArrayDynamicIndexing || merge_in.shaderStorageTexelBufferArrayDynamicIndexing; + current.shaderUniformBufferArrayNonUniformIndexing = current.shaderUniformBufferArrayNonUniformIndexing || merge_in.shaderUniformBufferArrayNonUniformIndexing; + current.shaderSampledImageArrayNonUniformIndexing = current.shaderSampledImageArrayNonUniformIndexing || merge_in.shaderSampledImageArrayNonUniformIndexing; + current.shaderStorageBufferArrayNonUniformIndexing = current.shaderStorageBufferArrayNonUniformIndexing || merge_in.shaderStorageBufferArrayNonUniformIndexing; + current.shaderStorageImageArrayNonUniformIndexing = current.shaderStorageImageArrayNonUniformIndexing || merge_in.shaderStorageImageArrayNonUniformIndexing; + current.shaderInputAttachmentArrayNonUniformIndexing = current.shaderInputAttachmentArrayNonUniformIndexing || merge_in.shaderInputAttachmentArrayNonUniformIndexing; + current.shaderUniformTexelBufferArrayNonUniformIndexing = current.shaderUniformTexelBufferArrayNonUniformIndexing || merge_in.shaderUniformTexelBufferArrayNonUniformIndexing; + current.shaderStorageTexelBufferArrayNonUniformIndexing = current.shaderStorageTexelBufferArrayNonUniformIndexing || merge_in.shaderStorageTexelBufferArrayNonUniformIndexing; + current.descriptorBindingUniformBufferUpdateAfterBind = current.descriptorBindingUniformBufferUpdateAfterBind || merge_in.descriptorBindingUniformBufferUpdateAfterBind; + current.descriptorBindingSampledImageUpdateAfterBind = current.descriptorBindingSampledImageUpdateAfterBind || merge_in.descriptorBindingSampledImageUpdateAfterBind; + current.descriptorBindingStorageImageUpdateAfterBind = current.descriptorBindingStorageImageUpdateAfterBind || merge_in.descriptorBindingStorageImageUpdateAfterBind; + current.descriptorBindingStorageBufferUpdateAfterBind = current.descriptorBindingStorageBufferUpdateAfterBind || merge_in.descriptorBindingStorageBufferUpdateAfterBind; + current.descriptorBindingUniformTexelBufferUpdateAfterBind = current.descriptorBindingUniformTexelBufferUpdateAfterBind || merge_in.descriptorBindingUniformTexelBufferUpdateAfterBind; + current.descriptorBindingStorageTexelBufferUpdateAfterBind = current.descriptorBindingStorageTexelBufferUpdateAfterBind || merge_in.descriptorBindingStorageTexelBufferUpdateAfterBind; + current.descriptorBindingUpdateUnusedWhilePending = current.descriptorBindingUpdateUnusedWhilePending || merge_in.descriptorBindingUpdateUnusedWhilePending; + current.descriptorBindingPartiallyBound = current.descriptorBindingPartiallyBound || merge_in.descriptorBindingPartiallyBound; + current.descriptorBindingVariableDescriptorCount = current.descriptorBindingVariableDescriptorCount || merge_in.descriptorBindingVariableDescriptorCount; + current.runtimeDescriptorArray = current.runtimeDescriptorArray || merge_in.runtimeDescriptorArray; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) +void compare_VkPhysicalDeviceScalarBlockLayoutFeatures(std::vector & error_list, VkPhysicalDeviceScalarBlockLayoutFeatures const& supported, VkPhysicalDeviceScalarBlockLayoutFeatures const& requested) { + if (requested.scalarBlockLayout && !supported.scalarBlockLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceScalarBlockLayoutFeatures::scalarBlockLayout"); + } +} +void merge_VkPhysicalDeviceScalarBlockLayoutFeatures(VkPhysicalDeviceScalarBlockLayoutFeatures & current, VkPhysicalDeviceScalarBlockLayoutFeatures const& merge_in) { + current.scalarBlockLayout = current.scalarBlockLayout || merge_in.scalarBlockLayout; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) +void compare_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT(std::vector & error_list, VkPhysicalDeviceScalarBlockLayoutFeaturesEXT const& supported, VkPhysicalDeviceScalarBlockLayoutFeaturesEXT const& requested) { + if (requested.scalarBlockLayout && !supported.scalarBlockLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceScalarBlockLayoutFeaturesEXT::scalarBlockLayout"); + } +} +void merge_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT(VkPhysicalDeviceScalarBlockLayoutFeaturesEXT & current, VkPhysicalDeviceScalarBlockLayoutFeaturesEXT const& merge_in) { + current.scalarBlockLayout = current.scalarBlockLayout || merge_in.scalarBlockLayout; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) +void compare_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(std::vector & error_list, VkPhysicalDeviceUniformBufferStandardLayoutFeatures const& supported, VkPhysicalDeviceUniformBufferStandardLayoutFeatures const& requested) { + if (requested.uniformBufferStandardLayout && !supported.uniformBufferStandardLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceUniformBufferStandardLayoutFeatures::uniformBufferStandardLayout"); + } +} +void merge_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(VkPhysicalDeviceUniformBufferStandardLayoutFeatures & current, VkPhysicalDeviceUniformBufferStandardLayoutFeatures const& merge_in) { + current.uniformBufferStandardLayout = current.uniformBufferStandardLayout || merge_in.uniformBufferStandardLayout; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) +void compare_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR(std::vector & error_list, VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR const& supported, VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR const& requested) { + if (requested.uniformBufferStandardLayout && !supported.uniformBufferStandardLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR::uniformBufferStandardLayout"); + } +} +void merge_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR(VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR & current, VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR const& merge_in) { + current.uniformBufferStandardLayout = current.uniformBufferStandardLayout || merge_in.uniformBufferStandardLayout; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) +void compare_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(std::vector & error_list, VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const& supported, VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const& requested) { + if (requested.shaderSubgroupExtendedTypes && !supported.shaderSubgroupExtendedTypes) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures::shaderSubgroupExtendedTypes"); + } +} +void merge_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures & current, VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const& merge_in) { + current.shaderSubgroupExtendedTypes = current.shaderSubgroupExtendedTypes || merge_in.shaderSubgroupExtendedTypes; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) +void compare_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR const& supported, VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR const& requested) { + if (requested.shaderSubgroupExtendedTypes && !supported.shaderSubgroupExtendedTypes) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR::shaderSubgroupExtendedTypes"); + } +} +void merge_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR(VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR & current, VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR const& merge_in) { + current.shaderSubgroupExtendedTypes = current.shaderSubgroupExtendedTypes || merge_in.shaderSubgroupExtendedTypes; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) +void compare_VkPhysicalDeviceImagelessFramebufferFeatures(std::vector & error_list, VkPhysicalDeviceImagelessFramebufferFeatures const& supported, VkPhysicalDeviceImagelessFramebufferFeatures const& requested) { + if (requested.imagelessFramebuffer && !supported.imagelessFramebuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceImagelessFramebufferFeatures::imagelessFramebuffer"); + } +} +void merge_VkPhysicalDeviceImagelessFramebufferFeatures(VkPhysicalDeviceImagelessFramebufferFeatures & current, VkPhysicalDeviceImagelessFramebufferFeatures const& merge_in) { + current.imagelessFramebuffer = current.imagelessFramebuffer || merge_in.imagelessFramebuffer; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) +void compare_VkPhysicalDeviceImagelessFramebufferFeaturesKHR(std::vector & error_list, VkPhysicalDeviceImagelessFramebufferFeaturesKHR const& supported, VkPhysicalDeviceImagelessFramebufferFeaturesKHR const& requested) { + if (requested.imagelessFramebuffer && !supported.imagelessFramebuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceImagelessFramebufferFeaturesKHR::imagelessFramebuffer"); + } +} +void merge_VkPhysicalDeviceImagelessFramebufferFeaturesKHR(VkPhysicalDeviceImagelessFramebufferFeaturesKHR & current, VkPhysicalDeviceImagelessFramebufferFeaturesKHR const& merge_in) { + current.imagelessFramebuffer = current.imagelessFramebuffer || merge_in.imagelessFramebuffer; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) +void compare_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(std::vector & error_list, VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const& supported, VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const& requested) { + if (requested.separateDepthStencilLayouts && !supported.separateDepthStencilLayouts) { + error_list.push_back("Missing feature VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures::separateDepthStencilLayouts"); + } +} +void merge_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures & current, VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const& merge_in) { + current.separateDepthStencilLayouts = current.separateDepthStencilLayouts || merge_in.separateDepthStencilLayouts; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) +void compare_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR(std::vector & error_list, VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR const& supported, VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR const& requested) { + if (requested.separateDepthStencilLayouts && !supported.separateDepthStencilLayouts) { + error_list.push_back("Missing feature VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR::separateDepthStencilLayouts"); + } +} +void merge_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR(VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR & current, VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR const& merge_in) { + current.separateDepthStencilLayouts = current.separateDepthStencilLayouts || merge_in.separateDepthStencilLayouts; +} +#endif //(defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) +#if (defined(VK_VERSION_1_3)) +void compare_VkPhysicalDeviceVulkan13Features(std::vector & error_list, VkPhysicalDeviceVulkan13Features const& supported, VkPhysicalDeviceVulkan13Features const& requested) { + if (requested.robustImageAccess && !supported.robustImageAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::robustImageAccess"); + } + if (requested.inlineUniformBlock && !supported.inlineUniformBlock) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::inlineUniformBlock"); + } + if (requested.descriptorBindingInlineUniformBlockUpdateAfterBind && !supported.descriptorBindingInlineUniformBlockUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::descriptorBindingInlineUniformBlockUpdateAfterBind"); + } + if (requested.pipelineCreationCacheControl && !supported.pipelineCreationCacheControl) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::pipelineCreationCacheControl"); + } + if (requested.privateData && !supported.privateData) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::privateData"); + } + if (requested.shaderDemoteToHelperInvocation && !supported.shaderDemoteToHelperInvocation) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::shaderDemoteToHelperInvocation"); + } + if (requested.shaderTerminateInvocation && !supported.shaderTerminateInvocation) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::shaderTerminateInvocation"); + } + if (requested.subgroupSizeControl && !supported.subgroupSizeControl) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::subgroupSizeControl"); + } + if (requested.computeFullSubgroups && !supported.computeFullSubgroups) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::computeFullSubgroups"); + } + if (requested.synchronization2 && !supported.synchronization2) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::synchronization2"); + } + if (requested.textureCompressionASTC_HDR && !supported.textureCompressionASTC_HDR) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::textureCompressionASTC_HDR"); + } + if (requested.shaderZeroInitializeWorkgroupMemory && !supported.shaderZeroInitializeWorkgroupMemory) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::shaderZeroInitializeWorkgroupMemory"); + } + if (requested.dynamicRendering && !supported.dynamicRendering) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::dynamicRendering"); + } + if (requested.shaderIntegerDotProduct && !supported.shaderIntegerDotProduct) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::shaderIntegerDotProduct"); + } + if (requested.maintenance4 && !supported.maintenance4) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan13Features::maintenance4"); + } +} +void merge_VkPhysicalDeviceVulkan13Features(VkPhysicalDeviceVulkan13Features & current, VkPhysicalDeviceVulkan13Features const& merge_in) { + current.robustImageAccess = current.robustImageAccess || merge_in.robustImageAccess; + current.inlineUniformBlock = current.inlineUniformBlock || merge_in.inlineUniformBlock; + current.descriptorBindingInlineUniformBlockUpdateAfterBind = current.descriptorBindingInlineUniformBlockUpdateAfterBind || merge_in.descriptorBindingInlineUniformBlockUpdateAfterBind; + current.pipelineCreationCacheControl = current.pipelineCreationCacheControl || merge_in.pipelineCreationCacheControl; + current.privateData = current.privateData || merge_in.privateData; + current.shaderDemoteToHelperInvocation = current.shaderDemoteToHelperInvocation || merge_in.shaderDemoteToHelperInvocation; + current.shaderTerminateInvocation = current.shaderTerminateInvocation || merge_in.shaderTerminateInvocation; + current.subgroupSizeControl = current.subgroupSizeControl || merge_in.subgroupSizeControl; + current.computeFullSubgroups = current.computeFullSubgroups || merge_in.computeFullSubgroups; + current.synchronization2 = current.synchronization2 || merge_in.synchronization2; + current.textureCompressionASTC_HDR = current.textureCompressionASTC_HDR || merge_in.textureCompressionASTC_HDR; + current.shaderZeroInitializeWorkgroupMemory = current.shaderZeroInitializeWorkgroupMemory || merge_in.shaderZeroInitializeWorkgroupMemory; + current.dynamicRendering = current.dynamicRendering || merge_in.dynamicRendering; + current.shaderIntegerDotProduct = current.shaderIntegerDotProduct || merge_in.shaderIntegerDotProduct; + current.maintenance4 = current.maintenance4 || merge_in.maintenance4; +} +#endif //(defined(VK_VERSION_1_3)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) +void compare_VkPhysicalDevicePrivateDataFeatures(std::vector & error_list, VkPhysicalDevicePrivateDataFeatures const& supported, VkPhysicalDevicePrivateDataFeatures const& requested) { + if (requested.privateData && !supported.privateData) { + error_list.push_back("Missing feature VkPhysicalDevicePrivateDataFeatures::privateData"); + } +} +void merge_VkPhysicalDevicePrivateDataFeatures(VkPhysicalDevicePrivateDataFeatures & current, VkPhysicalDevicePrivateDataFeatures const& merge_in) { + current.privateData = current.privateData || merge_in.privateData; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) +void compare_VkPhysicalDevicePrivateDataFeaturesEXT(std::vector & error_list, VkPhysicalDevicePrivateDataFeaturesEXT const& supported, VkPhysicalDevicePrivateDataFeaturesEXT const& requested) { + if (requested.privateData && !supported.privateData) { + error_list.push_back("Missing feature VkPhysicalDevicePrivateDataFeaturesEXT::privateData"); + } +} +void merge_VkPhysicalDevicePrivateDataFeaturesEXT(VkPhysicalDevicePrivateDataFeaturesEXT & current, VkPhysicalDevicePrivateDataFeaturesEXT const& merge_in) { + current.privateData = current.privateData || merge_in.privateData; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) +void compare_VkPhysicalDeviceSynchronization2Features(std::vector & error_list, VkPhysicalDeviceSynchronization2Features const& supported, VkPhysicalDeviceSynchronization2Features const& requested) { + if (requested.synchronization2 && !supported.synchronization2) { + error_list.push_back("Missing feature VkPhysicalDeviceSynchronization2Features::synchronization2"); + } +} +void merge_VkPhysicalDeviceSynchronization2Features(VkPhysicalDeviceSynchronization2Features & current, VkPhysicalDeviceSynchronization2Features const& merge_in) { + current.synchronization2 = current.synchronization2 || merge_in.synchronization2; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) +void compare_VkPhysicalDeviceSynchronization2FeaturesKHR(std::vector & error_list, VkPhysicalDeviceSynchronization2FeaturesKHR const& supported, VkPhysicalDeviceSynchronization2FeaturesKHR const& requested) { + if (requested.synchronization2 && !supported.synchronization2) { + error_list.push_back("Missing feature VkPhysicalDeviceSynchronization2FeaturesKHR::synchronization2"); + } +} +void merge_VkPhysicalDeviceSynchronization2FeaturesKHR(VkPhysicalDeviceSynchronization2FeaturesKHR & current, VkPhysicalDeviceSynchronization2FeaturesKHR const& merge_in) { + current.synchronization2 = current.synchronization2 || merge_in.synchronization2; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) +void compare_VkPhysicalDeviceTextureCompressionASTCHDRFeatures(std::vector & error_list, VkPhysicalDeviceTextureCompressionASTCHDRFeatures const& supported, VkPhysicalDeviceTextureCompressionASTCHDRFeatures const& requested) { + if (requested.textureCompressionASTC_HDR && !supported.textureCompressionASTC_HDR) { + error_list.push_back("Missing feature VkPhysicalDeviceTextureCompressionASTCHDRFeatures::textureCompressionASTC_HDR"); + } +} +void merge_VkPhysicalDeviceTextureCompressionASTCHDRFeatures(VkPhysicalDeviceTextureCompressionASTCHDRFeatures & current, VkPhysicalDeviceTextureCompressionASTCHDRFeatures const& merge_in) { + current.textureCompressionASTC_HDR = current.textureCompressionASTC_HDR || merge_in.textureCompressionASTC_HDR; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) +void compare_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(std::vector & error_list, VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& supported, VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& requested) { + if (requested.textureCompressionASTC_HDR && !supported.textureCompressionASTC_HDR) { + error_list.push_back("Missing feature VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT::textureCompressionASTC_HDR"); + } +} +void merge_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT & current, VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& merge_in) { + current.textureCompressionASTC_HDR = current.textureCompressionASTC_HDR || merge_in.textureCompressionASTC_HDR; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) +void compare_VkPhysicalDeviceMaintenance4Features(std::vector & error_list, VkPhysicalDeviceMaintenance4Features const& supported, VkPhysicalDeviceMaintenance4Features const& requested) { + if (requested.maintenance4 && !supported.maintenance4) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance4Features::maintenance4"); + } +} +void merge_VkPhysicalDeviceMaintenance4Features(VkPhysicalDeviceMaintenance4Features & current, VkPhysicalDeviceMaintenance4Features const& merge_in) { + current.maintenance4 = current.maintenance4 || merge_in.maintenance4; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) +void compare_VkPhysicalDeviceMaintenance4FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance4FeaturesKHR const& supported, VkPhysicalDeviceMaintenance4FeaturesKHR const& requested) { + if (requested.maintenance4 && !supported.maintenance4) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance4FeaturesKHR::maintenance4"); + } +} +void merge_VkPhysicalDeviceMaintenance4FeaturesKHR(VkPhysicalDeviceMaintenance4FeaturesKHR & current, VkPhysicalDeviceMaintenance4FeaturesKHR const& merge_in) { + current.maintenance4 = current.maintenance4 || merge_in.maintenance4; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) +void compare_VkPhysicalDeviceShaderTerminateInvocationFeatures(std::vector & error_list, VkPhysicalDeviceShaderTerminateInvocationFeatures const& supported, VkPhysicalDeviceShaderTerminateInvocationFeatures const& requested) { + if (requested.shaderTerminateInvocation && !supported.shaderTerminateInvocation) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderTerminateInvocationFeatures::shaderTerminateInvocation"); + } +} +void merge_VkPhysicalDeviceShaderTerminateInvocationFeatures(VkPhysicalDeviceShaderTerminateInvocationFeatures & current, VkPhysicalDeviceShaderTerminateInvocationFeatures const& merge_in) { + current.shaderTerminateInvocation = current.shaderTerminateInvocation || merge_in.shaderTerminateInvocation; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) +void compare_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const& supported, VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const& requested) { + if (requested.shaderTerminateInvocation && !supported.shaderTerminateInvocation) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR::shaderTerminateInvocation"); + } +} +void merge_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR & current, VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const& merge_in) { + current.shaderTerminateInvocation = current.shaderTerminateInvocation || merge_in.shaderTerminateInvocation; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) +void compare_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures(std::vector & error_list, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures const& supported, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures const& requested) { + if (requested.shaderDemoteToHelperInvocation && !supported.shaderDemoteToHelperInvocation) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures::shaderDemoteToHelperInvocation"); + } +} +void merge_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures(VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures & current, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures const& merge_in) { + current.shaderDemoteToHelperInvocation = current.shaderDemoteToHelperInvocation || merge_in.shaderDemoteToHelperInvocation; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) +void compare_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& supported, VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& requested) { + if (requested.shaderDemoteToHelperInvocation && !supported.shaderDemoteToHelperInvocation) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT::shaderDemoteToHelperInvocation"); + } +} +void merge_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT & current, VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& merge_in) { + current.shaderDemoteToHelperInvocation = current.shaderDemoteToHelperInvocation || merge_in.shaderDemoteToHelperInvocation; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) +void compare_VkPhysicalDevicePipelineCreationCacheControlFeatures(std::vector & error_list, VkPhysicalDevicePipelineCreationCacheControlFeatures const& supported, VkPhysicalDevicePipelineCreationCacheControlFeatures const& requested) { + if (requested.pipelineCreationCacheControl && !supported.pipelineCreationCacheControl) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineCreationCacheControlFeatures::pipelineCreationCacheControl"); + } +} +void merge_VkPhysicalDevicePipelineCreationCacheControlFeatures(VkPhysicalDevicePipelineCreationCacheControlFeatures & current, VkPhysicalDevicePipelineCreationCacheControlFeatures const& merge_in) { + current.pipelineCreationCacheControl = current.pipelineCreationCacheControl || merge_in.pipelineCreationCacheControl; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) +void compare_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const& supported, VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const& requested) { + if (requested.pipelineCreationCacheControl && !supported.pipelineCreationCacheControl) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT::pipelineCreationCacheControl"); + } +} +void merge_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT & current, VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const& merge_in) { + current.pipelineCreationCacheControl = current.pipelineCreationCacheControl || merge_in.pipelineCreationCacheControl; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) +void compare_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures(std::vector & error_list, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures const& supported, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures const& requested) { + if (requested.shaderZeroInitializeWorkgroupMemory && !supported.shaderZeroInitializeWorkgroupMemory) { + error_list.push_back("Missing feature VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures::shaderZeroInitializeWorkgroupMemory"); + } +} +void merge_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures & current, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures const& merge_in) { + current.shaderZeroInitializeWorkgroupMemory = current.shaderZeroInitializeWorkgroupMemory || merge_in.shaderZeroInitializeWorkgroupMemory; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) +void compare_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR(std::vector & error_list, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR const& supported, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR const& requested) { + if (requested.shaderZeroInitializeWorkgroupMemory && !supported.shaderZeroInitializeWorkgroupMemory) { + error_list.push_back("Missing feature VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR::shaderZeroInitializeWorkgroupMemory"); + } +} +void merge_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR & current, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR const& merge_in) { + current.shaderZeroInitializeWorkgroupMemory = current.shaderZeroInitializeWorkgroupMemory || merge_in.shaderZeroInitializeWorkgroupMemory; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) +void compare_VkPhysicalDeviceImageRobustnessFeatures(std::vector & error_list, VkPhysicalDeviceImageRobustnessFeatures const& supported, VkPhysicalDeviceImageRobustnessFeatures const& requested) { + if (requested.robustImageAccess && !supported.robustImageAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceImageRobustnessFeatures::robustImageAccess"); + } +} +void merge_VkPhysicalDeviceImageRobustnessFeatures(VkPhysicalDeviceImageRobustnessFeatures & current, VkPhysicalDeviceImageRobustnessFeatures const& merge_in) { + current.robustImageAccess = current.robustImageAccess || merge_in.robustImageAccess; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) +void compare_VkPhysicalDeviceImageRobustnessFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageRobustnessFeaturesEXT const& supported, VkPhysicalDeviceImageRobustnessFeaturesEXT const& requested) { + if (requested.robustImageAccess && !supported.robustImageAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceImageRobustnessFeaturesEXT::robustImageAccess"); + } +} +void merge_VkPhysicalDeviceImageRobustnessFeaturesEXT(VkPhysicalDeviceImageRobustnessFeaturesEXT & current, VkPhysicalDeviceImageRobustnessFeaturesEXT const& merge_in) { + current.robustImageAccess = current.robustImageAccess || merge_in.robustImageAccess; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) +void compare_VkPhysicalDeviceSubgroupSizeControlFeatures(std::vector & error_list, VkPhysicalDeviceSubgroupSizeControlFeatures const& supported, VkPhysicalDeviceSubgroupSizeControlFeatures const& requested) { + if (requested.subgroupSizeControl && !supported.subgroupSizeControl) { + error_list.push_back("Missing feature VkPhysicalDeviceSubgroupSizeControlFeatures::subgroupSizeControl"); + } + if (requested.computeFullSubgroups && !supported.computeFullSubgroups) { + error_list.push_back("Missing feature VkPhysicalDeviceSubgroupSizeControlFeatures::computeFullSubgroups"); + } +} +void merge_VkPhysicalDeviceSubgroupSizeControlFeatures(VkPhysicalDeviceSubgroupSizeControlFeatures & current, VkPhysicalDeviceSubgroupSizeControlFeatures const& merge_in) { + current.subgroupSizeControl = current.subgroupSizeControl || merge_in.subgroupSizeControl; + current.computeFullSubgroups = current.computeFullSubgroups || merge_in.computeFullSubgroups; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) +void compare_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const& supported, VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const& requested) { + if (requested.subgroupSizeControl && !supported.subgroupSizeControl) { + error_list.push_back("Missing feature VkPhysicalDeviceSubgroupSizeControlFeaturesEXT::subgroupSizeControl"); + } + if (requested.computeFullSubgroups && !supported.computeFullSubgroups) { + error_list.push_back("Missing feature VkPhysicalDeviceSubgroupSizeControlFeaturesEXT::computeFullSubgroups"); + } +} +void merge_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(VkPhysicalDeviceSubgroupSizeControlFeaturesEXT & current, VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const& merge_in) { + current.subgroupSizeControl = current.subgroupSizeControl || merge_in.subgroupSizeControl; + current.computeFullSubgroups = current.computeFullSubgroups || merge_in.computeFullSubgroups; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) +void compare_VkPhysicalDeviceInlineUniformBlockFeatures(std::vector & error_list, VkPhysicalDeviceInlineUniformBlockFeatures const& supported, VkPhysicalDeviceInlineUniformBlockFeatures const& requested) { + if (requested.inlineUniformBlock && !supported.inlineUniformBlock) { + error_list.push_back("Missing feature VkPhysicalDeviceInlineUniformBlockFeatures::inlineUniformBlock"); + } + if (requested.descriptorBindingInlineUniformBlockUpdateAfterBind && !supported.descriptorBindingInlineUniformBlockUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceInlineUniformBlockFeatures::descriptorBindingInlineUniformBlockUpdateAfterBind"); + } +} +void merge_VkPhysicalDeviceInlineUniformBlockFeatures(VkPhysicalDeviceInlineUniformBlockFeatures & current, VkPhysicalDeviceInlineUniformBlockFeatures const& merge_in) { + current.inlineUniformBlock = current.inlineUniformBlock || merge_in.inlineUniformBlock; + current.descriptorBindingInlineUniformBlockUpdateAfterBind = current.descriptorBindingInlineUniformBlockUpdateAfterBind || merge_in.descriptorBindingInlineUniformBlockUpdateAfterBind; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) +void compare_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(std::vector & error_list, VkPhysicalDeviceInlineUniformBlockFeaturesEXT const& supported, VkPhysicalDeviceInlineUniformBlockFeaturesEXT const& requested) { + if (requested.inlineUniformBlock && !supported.inlineUniformBlock) { + error_list.push_back("Missing feature VkPhysicalDeviceInlineUniformBlockFeaturesEXT::inlineUniformBlock"); + } + if (requested.descriptorBindingInlineUniformBlockUpdateAfterBind && !supported.descriptorBindingInlineUniformBlockUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceInlineUniformBlockFeaturesEXT::descriptorBindingInlineUniformBlockUpdateAfterBind"); + } +} +void merge_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(VkPhysicalDeviceInlineUniformBlockFeaturesEXT & current, VkPhysicalDeviceInlineUniformBlockFeaturesEXT const& merge_in) { + current.inlineUniformBlock = current.inlineUniformBlock || merge_in.inlineUniformBlock; + current.descriptorBindingInlineUniformBlockUpdateAfterBind = current.descriptorBindingInlineUniformBlockUpdateAfterBind || merge_in.descriptorBindingInlineUniformBlockUpdateAfterBind; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) +void compare_VkPhysicalDeviceShaderIntegerDotProductFeatures(std::vector & error_list, VkPhysicalDeviceShaderIntegerDotProductFeatures const& supported, VkPhysicalDeviceShaderIntegerDotProductFeatures const& requested) { + if (requested.shaderIntegerDotProduct && !supported.shaderIntegerDotProduct) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderIntegerDotProductFeatures::shaderIntegerDotProduct"); + } +} +void merge_VkPhysicalDeviceShaderIntegerDotProductFeatures(VkPhysicalDeviceShaderIntegerDotProductFeatures & current, VkPhysicalDeviceShaderIntegerDotProductFeatures const& merge_in) { + current.shaderIntegerDotProduct = current.shaderIntegerDotProduct || merge_in.shaderIntegerDotProduct; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) +void compare_VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR const& supported, VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR const& requested) { + if (requested.shaderIntegerDotProduct && !supported.shaderIntegerDotProduct) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR::shaderIntegerDotProduct"); + } +} +void merge_VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR(VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR & current, VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR const& merge_in) { + current.shaderIntegerDotProduct = current.shaderIntegerDotProduct || merge_in.shaderIntegerDotProduct; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) +void compare_VkPhysicalDeviceDynamicRenderingFeatures(std::vector & error_list, VkPhysicalDeviceDynamicRenderingFeatures const& supported, VkPhysicalDeviceDynamicRenderingFeatures const& requested) { + if (requested.dynamicRendering && !supported.dynamicRendering) { + error_list.push_back("Missing feature VkPhysicalDeviceDynamicRenderingFeatures::dynamicRendering"); + } +} +void merge_VkPhysicalDeviceDynamicRenderingFeatures(VkPhysicalDeviceDynamicRenderingFeatures & current, VkPhysicalDeviceDynamicRenderingFeatures const& merge_in) { + current.dynamicRendering = current.dynamicRendering || merge_in.dynamicRendering; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) +void compare_VkPhysicalDeviceDynamicRenderingFeaturesKHR(std::vector & error_list, VkPhysicalDeviceDynamicRenderingFeaturesKHR const& supported, VkPhysicalDeviceDynamicRenderingFeaturesKHR const& requested) { + if (requested.dynamicRendering && !supported.dynamicRendering) { + error_list.push_back("Missing feature VkPhysicalDeviceDynamicRenderingFeaturesKHR::dynamicRendering"); + } +} +void merge_VkPhysicalDeviceDynamicRenderingFeaturesKHR(VkPhysicalDeviceDynamicRenderingFeaturesKHR & current, VkPhysicalDeviceDynamicRenderingFeaturesKHR const& merge_in) { + current.dynamicRendering = current.dynamicRendering || merge_in.dynamicRendering; +} +#endif //(defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) +#if (defined(VK_VERSION_1_4)) +void compare_VkPhysicalDeviceVulkan14Features(std::vector & error_list, VkPhysicalDeviceVulkan14Features const& supported, VkPhysicalDeviceVulkan14Features const& requested) { + if (requested.globalPriorityQuery && !supported.globalPriorityQuery) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::globalPriorityQuery"); + } + if (requested.shaderSubgroupRotate && !supported.shaderSubgroupRotate) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::shaderSubgroupRotate"); + } + if (requested.shaderSubgroupRotateClustered && !supported.shaderSubgroupRotateClustered) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::shaderSubgroupRotateClustered"); + } + if (requested.shaderFloatControls2 && !supported.shaderFloatControls2) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::shaderFloatControls2"); + } + if (requested.shaderExpectAssume && !supported.shaderExpectAssume) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::shaderExpectAssume"); + } + if (requested.rectangularLines && !supported.rectangularLines) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::rectangularLines"); + } + if (requested.bresenhamLines && !supported.bresenhamLines) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::bresenhamLines"); + } + if (requested.smoothLines && !supported.smoothLines) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::smoothLines"); + } + if (requested.stippledRectangularLines && !supported.stippledRectangularLines) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::stippledRectangularLines"); + } + if (requested.stippledBresenhamLines && !supported.stippledBresenhamLines) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::stippledBresenhamLines"); + } + if (requested.stippledSmoothLines && !supported.stippledSmoothLines) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::stippledSmoothLines"); + } + if (requested.vertexAttributeInstanceRateDivisor && !supported.vertexAttributeInstanceRateDivisor) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::vertexAttributeInstanceRateDivisor"); + } + if (requested.vertexAttributeInstanceRateZeroDivisor && !supported.vertexAttributeInstanceRateZeroDivisor) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::vertexAttributeInstanceRateZeroDivisor"); + } + if (requested.indexTypeUint8 && !supported.indexTypeUint8) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::indexTypeUint8"); + } + if (requested.dynamicRenderingLocalRead && !supported.dynamicRenderingLocalRead) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::dynamicRenderingLocalRead"); + } + if (requested.maintenance5 && !supported.maintenance5) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::maintenance5"); + } + if (requested.maintenance6 && !supported.maintenance6) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::maintenance6"); + } + if (requested.pipelineProtectedAccess && !supported.pipelineProtectedAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::pipelineProtectedAccess"); + } + if (requested.pipelineRobustness && !supported.pipelineRobustness) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::pipelineRobustness"); + } + if (requested.hostImageCopy && !supported.hostImageCopy) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::hostImageCopy"); + } + if (requested.pushDescriptor && !supported.pushDescriptor) { + error_list.push_back("Missing feature VkPhysicalDeviceVulkan14Features::pushDescriptor"); + } +} +void merge_VkPhysicalDeviceVulkan14Features(VkPhysicalDeviceVulkan14Features & current, VkPhysicalDeviceVulkan14Features const& merge_in) { + current.globalPriorityQuery = current.globalPriorityQuery || merge_in.globalPriorityQuery; + current.shaderSubgroupRotate = current.shaderSubgroupRotate || merge_in.shaderSubgroupRotate; + current.shaderSubgroupRotateClustered = current.shaderSubgroupRotateClustered || merge_in.shaderSubgroupRotateClustered; + current.shaderFloatControls2 = current.shaderFloatControls2 || merge_in.shaderFloatControls2; + current.shaderExpectAssume = current.shaderExpectAssume || merge_in.shaderExpectAssume; + current.rectangularLines = current.rectangularLines || merge_in.rectangularLines; + current.bresenhamLines = current.bresenhamLines || merge_in.bresenhamLines; + current.smoothLines = current.smoothLines || merge_in.smoothLines; + current.stippledRectangularLines = current.stippledRectangularLines || merge_in.stippledRectangularLines; + current.stippledBresenhamLines = current.stippledBresenhamLines || merge_in.stippledBresenhamLines; + current.stippledSmoothLines = current.stippledSmoothLines || merge_in.stippledSmoothLines; + current.vertexAttributeInstanceRateDivisor = current.vertexAttributeInstanceRateDivisor || merge_in.vertexAttributeInstanceRateDivisor; + current.vertexAttributeInstanceRateZeroDivisor = current.vertexAttributeInstanceRateZeroDivisor || merge_in.vertexAttributeInstanceRateZeroDivisor; + current.indexTypeUint8 = current.indexTypeUint8 || merge_in.indexTypeUint8; + current.dynamicRenderingLocalRead = current.dynamicRenderingLocalRead || merge_in.dynamicRenderingLocalRead; + current.maintenance5 = current.maintenance5 || merge_in.maintenance5; + current.maintenance6 = current.maintenance6 || merge_in.maintenance6; + current.pipelineProtectedAccess = current.pipelineProtectedAccess || merge_in.pipelineProtectedAccess; + current.pipelineRobustness = current.pipelineRobustness || merge_in.pipelineRobustness; + current.hostImageCopy = current.hostImageCopy || merge_in.hostImageCopy; + current.pushDescriptor = current.pushDescriptor || merge_in.pushDescriptor; +} +#endif //(defined(VK_VERSION_1_4)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority) || defined(VK_EXT_global_priority_query)) +void compare_VkPhysicalDeviceGlobalPriorityQueryFeatures(std::vector & error_list, VkPhysicalDeviceGlobalPriorityQueryFeatures const& supported, VkPhysicalDeviceGlobalPriorityQueryFeatures const& requested) { + if (requested.globalPriorityQuery && !supported.globalPriorityQuery) { + error_list.push_back("Missing feature VkPhysicalDeviceGlobalPriorityQueryFeatures::globalPriorityQuery"); + } +} +void merge_VkPhysicalDeviceGlobalPriorityQueryFeatures(VkPhysicalDeviceGlobalPriorityQueryFeatures & current, VkPhysicalDeviceGlobalPriorityQueryFeatures const& merge_in) { + current.globalPriorityQuery = current.globalPriorityQuery || merge_in.globalPriorityQuery; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority) || defined(VK_EXT_global_priority_query)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority)) +void compare_VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR(std::vector & error_list, VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const& supported, VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const& requested) { + if (requested.globalPriorityQuery && !supported.globalPriorityQuery) { + error_list.push_back("Missing feature VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR::globalPriorityQuery"); + } +} +void merge_VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR(VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR & current, VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const& merge_in) { + current.globalPriorityQuery = current.globalPriorityQuery || merge_in.globalPriorityQuery; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_global_priority_query)) +void compare_VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT(std::vector & error_list, VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT const& supported, VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT const& requested) { + if (requested.globalPriorityQuery && !supported.globalPriorityQuery) { + error_list.push_back("Missing feature VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT::globalPriorityQuery"); + } +} +void merge_VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT(VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT & current, VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT const& merge_in) { + current.globalPriorityQuery = current.globalPriorityQuery || merge_in.globalPriorityQuery; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_global_priority_query)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8) || defined(VK_EXT_index_type_uint8)) +void compare_VkPhysicalDeviceIndexTypeUint8Features(std::vector & error_list, VkPhysicalDeviceIndexTypeUint8Features const& supported, VkPhysicalDeviceIndexTypeUint8Features const& requested) { + if (requested.indexTypeUint8 && !supported.indexTypeUint8) { + error_list.push_back("Missing feature VkPhysicalDeviceIndexTypeUint8Features::indexTypeUint8"); + } +} +void merge_VkPhysicalDeviceIndexTypeUint8Features(VkPhysicalDeviceIndexTypeUint8Features & current, VkPhysicalDeviceIndexTypeUint8Features const& merge_in) { + current.indexTypeUint8 = current.indexTypeUint8 || merge_in.indexTypeUint8; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8) || defined(VK_EXT_index_type_uint8)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8)) +void compare_VkPhysicalDeviceIndexTypeUint8FeaturesKHR(std::vector & error_list, VkPhysicalDeviceIndexTypeUint8FeaturesKHR const& supported, VkPhysicalDeviceIndexTypeUint8FeaturesKHR const& requested) { + if (requested.indexTypeUint8 && !supported.indexTypeUint8) { + error_list.push_back("Missing feature VkPhysicalDeviceIndexTypeUint8FeaturesKHR::indexTypeUint8"); + } +} +void merge_VkPhysicalDeviceIndexTypeUint8FeaturesKHR(VkPhysicalDeviceIndexTypeUint8FeaturesKHR & current, VkPhysicalDeviceIndexTypeUint8FeaturesKHR const& merge_in) { + current.indexTypeUint8 = current.indexTypeUint8 || merge_in.indexTypeUint8; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_index_type_uint8)) +void compare_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(std::vector & error_list, VkPhysicalDeviceIndexTypeUint8FeaturesEXT const& supported, VkPhysicalDeviceIndexTypeUint8FeaturesEXT const& requested) { + if (requested.indexTypeUint8 && !supported.indexTypeUint8) { + error_list.push_back("Missing feature VkPhysicalDeviceIndexTypeUint8FeaturesEXT::indexTypeUint8"); + } +} +void merge_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(VkPhysicalDeviceIndexTypeUint8FeaturesEXT & current, VkPhysicalDeviceIndexTypeUint8FeaturesEXT const& merge_in) { + current.indexTypeUint8 = current.indexTypeUint8 || merge_in.indexTypeUint8; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_index_type_uint8)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) +void compare_VkPhysicalDeviceMaintenance5Features(std::vector & error_list, VkPhysicalDeviceMaintenance5Features const& supported, VkPhysicalDeviceMaintenance5Features const& requested) { + if (requested.maintenance5 && !supported.maintenance5) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance5Features::maintenance5"); + } +} +void merge_VkPhysicalDeviceMaintenance5Features(VkPhysicalDeviceMaintenance5Features & current, VkPhysicalDeviceMaintenance5Features const& merge_in) { + current.maintenance5 = current.maintenance5 || merge_in.maintenance5; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) +void compare_VkPhysicalDeviceMaintenance5FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance5FeaturesKHR const& supported, VkPhysicalDeviceMaintenance5FeaturesKHR const& requested) { + if (requested.maintenance5 && !supported.maintenance5) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance5FeaturesKHR::maintenance5"); + } +} +void merge_VkPhysicalDeviceMaintenance5FeaturesKHR(VkPhysicalDeviceMaintenance5FeaturesKHR & current, VkPhysicalDeviceMaintenance5FeaturesKHR const& merge_in) { + current.maintenance5 = current.maintenance5 || merge_in.maintenance5; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) +void compare_VkPhysicalDeviceMaintenance6Features(std::vector & error_list, VkPhysicalDeviceMaintenance6Features const& supported, VkPhysicalDeviceMaintenance6Features const& requested) { + if (requested.maintenance6 && !supported.maintenance6) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance6Features::maintenance6"); + } +} +void merge_VkPhysicalDeviceMaintenance6Features(VkPhysicalDeviceMaintenance6Features & current, VkPhysicalDeviceMaintenance6Features const& merge_in) { + current.maintenance6 = current.maintenance6 || merge_in.maintenance6; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) +void compare_VkPhysicalDeviceMaintenance6FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance6FeaturesKHR const& supported, VkPhysicalDeviceMaintenance6FeaturesKHR const& requested) { + if (requested.maintenance6 && !supported.maintenance6) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance6FeaturesKHR::maintenance6"); + } +} +void merge_VkPhysicalDeviceMaintenance6FeaturesKHR(VkPhysicalDeviceMaintenance6FeaturesKHR & current, VkPhysicalDeviceMaintenance6FeaturesKHR const& merge_in) { + current.maintenance6 = current.maintenance6 || merge_in.maintenance6; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) +void compare_VkPhysicalDeviceHostImageCopyFeatures(std::vector & error_list, VkPhysicalDeviceHostImageCopyFeatures const& supported, VkPhysicalDeviceHostImageCopyFeatures const& requested) { + if (requested.hostImageCopy && !supported.hostImageCopy) { + error_list.push_back("Missing feature VkPhysicalDeviceHostImageCopyFeatures::hostImageCopy"); + } +} +void merge_VkPhysicalDeviceHostImageCopyFeatures(VkPhysicalDeviceHostImageCopyFeatures & current, VkPhysicalDeviceHostImageCopyFeatures const& merge_in) { + current.hostImageCopy = current.hostImageCopy || merge_in.hostImageCopy; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) +void compare_VkPhysicalDeviceHostImageCopyFeaturesEXT(std::vector & error_list, VkPhysicalDeviceHostImageCopyFeaturesEXT const& supported, VkPhysicalDeviceHostImageCopyFeaturesEXT const& requested) { + if (requested.hostImageCopy && !supported.hostImageCopy) { + error_list.push_back("Missing feature VkPhysicalDeviceHostImageCopyFeaturesEXT::hostImageCopy"); + } +} +void merge_VkPhysicalDeviceHostImageCopyFeaturesEXT(VkPhysicalDeviceHostImageCopyFeaturesEXT & current, VkPhysicalDeviceHostImageCopyFeaturesEXT const& merge_in) { + current.hostImageCopy = current.hostImageCopy || merge_in.hostImageCopy; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) +void compare_VkPhysicalDeviceShaderSubgroupRotateFeatures(std::vector & error_list, VkPhysicalDeviceShaderSubgroupRotateFeatures const& supported, VkPhysicalDeviceShaderSubgroupRotateFeatures const& requested) { + if (requested.shaderSubgroupRotate && !supported.shaderSubgroupRotate) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderSubgroupRotateFeatures::shaderSubgroupRotate"); + } + if (requested.shaderSubgroupRotateClustered && !supported.shaderSubgroupRotateClustered) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderSubgroupRotateFeatures::shaderSubgroupRotateClustered"); + } +} +void merge_VkPhysicalDeviceShaderSubgroupRotateFeatures(VkPhysicalDeviceShaderSubgroupRotateFeatures & current, VkPhysicalDeviceShaderSubgroupRotateFeatures const& merge_in) { + current.shaderSubgroupRotate = current.shaderSubgroupRotate || merge_in.shaderSubgroupRotate; + current.shaderSubgroupRotateClustered = current.shaderSubgroupRotateClustered || merge_in.shaderSubgroupRotateClustered; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) +void compare_VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const& supported, VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const& requested) { + if (requested.shaderSubgroupRotate && !supported.shaderSubgroupRotate) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR::shaderSubgroupRotate"); + } + if (requested.shaderSubgroupRotateClustered && !supported.shaderSubgroupRotateClustered) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR::shaderSubgroupRotateClustered"); + } +} +void merge_VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR(VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR & current, VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const& merge_in) { + current.shaderSubgroupRotate = current.shaderSubgroupRotate || merge_in.shaderSubgroupRotate; + current.shaderSubgroupRotateClustered = current.shaderSubgroupRotateClustered || merge_in.shaderSubgroupRotateClustered; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) +void compare_VkPhysicalDeviceShaderFloatControls2Features(std::vector & error_list, VkPhysicalDeviceShaderFloatControls2Features const& supported, VkPhysicalDeviceShaderFloatControls2Features const& requested) { + if (requested.shaderFloatControls2 && !supported.shaderFloatControls2) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFloatControls2Features::shaderFloatControls2"); + } +} +void merge_VkPhysicalDeviceShaderFloatControls2Features(VkPhysicalDeviceShaderFloatControls2Features & current, VkPhysicalDeviceShaderFloatControls2Features const& merge_in) { + current.shaderFloatControls2 = current.shaderFloatControls2 || merge_in.shaderFloatControls2; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) +void compare_VkPhysicalDeviceShaderFloatControls2FeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderFloatControls2FeaturesKHR const& supported, VkPhysicalDeviceShaderFloatControls2FeaturesKHR const& requested) { + if (requested.shaderFloatControls2 && !supported.shaderFloatControls2) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFloatControls2FeaturesKHR::shaderFloatControls2"); + } +} +void merge_VkPhysicalDeviceShaderFloatControls2FeaturesKHR(VkPhysicalDeviceShaderFloatControls2FeaturesKHR & current, VkPhysicalDeviceShaderFloatControls2FeaturesKHR const& merge_in) { + current.shaderFloatControls2 = current.shaderFloatControls2 || merge_in.shaderFloatControls2; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) +void compare_VkPhysicalDeviceShaderExpectAssumeFeatures(std::vector & error_list, VkPhysicalDeviceShaderExpectAssumeFeatures const& supported, VkPhysicalDeviceShaderExpectAssumeFeatures const& requested) { + if (requested.shaderExpectAssume && !supported.shaderExpectAssume) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderExpectAssumeFeatures::shaderExpectAssume"); + } +} +void merge_VkPhysicalDeviceShaderExpectAssumeFeatures(VkPhysicalDeviceShaderExpectAssumeFeatures & current, VkPhysicalDeviceShaderExpectAssumeFeatures const& merge_in) { + current.shaderExpectAssume = current.shaderExpectAssume || merge_in.shaderExpectAssume; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) +void compare_VkPhysicalDeviceShaderExpectAssumeFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const& supported, VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const& requested) { + if (requested.shaderExpectAssume && !supported.shaderExpectAssume) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderExpectAssumeFeaturesKHR::shaderExpectAssume"); + } +} +void merge_VkPhysicalDeviceShaderExpectAssumeFeaturesKHR(VkPhysicalDeviceShaderExpectAssumeFeaturesKHR & current, VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const& merge_in) { + current.shaderExpectAssume = current.shaderExpectAssume || merge_in.shaderExpectAssume; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) +void compare_VkPhysicalDevicePipelineProtectedAccessFeatures(std::vector & error_list, VkPhysicalDevicePipelineProtectedAccessFeatures const& supported, VkPhysicalDevicePipelineProtectedAccessFeatures const& requested) { + if (requested.pipelineProtectedAccess && !supported.pipelineProtectedAccess) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineProtectedAccessFeatures::pipelineProtectedAccess"); + } +} +void merge_VkPhysicalDevicePipelineProtectedAccessFeatures(VkPhysicalDevicePipelineProtectedAccessFeatures & current, VkPhysicalDevicePipelineProtectedAccessFeatures const& merge_in) { + current.pipelineProtectedAccess = current.pipelineProtectedAccess || merge_in.pipelineProtectedAccess; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) +void compare_VkPhysicalDevicePipelineProtectedAccessFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const& supported, VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const& requested) { + if (requested.pipelineProtectedAccess && !supported.pipelineProtectedAccess) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineProtectedAccessFeaturesEXT::pipelineProtectedAccess"); + } +} +void merge_VkPhysicalDevicePipelineProtectedAccessFeaturesEXT(VkPhysicalDevicePipelineProtectedAccessFeaturesEXT & current, VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const& merge_in) { + current.pipelineProtectedAccess = current.pipelineProtectedAccess || merge_in.pipelineProtectedAccess; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) +void compare_VkPhysicalDevicePipelineRobustnessFeatures(std::vector & error_list, VkPhysicalDevicePipelineRobustnessFeatures const& supported, VkPhysicalDevicePipelineRobustnessFeatures const& requested) { + if (requested.pipelineRobustness && !supported.pipelineRobustness) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineRobustnessFeatures::pipelineRobustness"); + } +} +void merge_VkPhysicalDevicePipelineRobustnessFeatures(VkPhysicalDevicePipelineRobustnessFeatures & current, VkPhysicalDevicePipelineRobustnessFeatures const& merge_in) { + current.pipelineRobustness = current.pipelineRobustness || merge_in.pipelineRobustness; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) +void compare_VkPhysicalDevicePipelineRobustnessFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelineRobustnessFeaturesEXT const& supported, VkPhysicalDevicePipelineRobustnessFeaturesEXT const& requested) { + if (requested.pipelineRobustness && !supported.pipelineRobustness) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineRobustnessFeaturesEXT::pipelineRobustness"); + } +} +void merge_VkPhysicalDevicePipelineRobustnessFeaturesEXT(VkPhysicalDevicePipelineRobustnessFeaturesEXT & current, VkPhysicalDevicePipelineRobustnessFeaturesEXT const& merge_in) { + current.pipelineRobustness = current.pipelineRobustness || merge_in.pipelineRobustness; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization) || defined(VK_EXT_line_rasterization)) +void compare_VkPhysicalDeviceLineRasterizationFeatures(std::vector & error_list, VkPhysicalDeviceLineRasterizationFeatures const& supported, VkPhysicalDeviceLineRasterizationFeatures const& requested) { + if (requested.rectangularLines && !supported.rectangularLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeatures::rectangularLines"); + } + if (requested.bresenhamLines && !supported.bresenhamLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeatures::bresenhamLines"); + } + if (requested.smoothLines && !supported.smoothLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeatures::smoothLines"); + } + if (requested.stippledRectangularLines && !supported.stippledRectangularLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeatures::stippledRectangularLines"); + } + if (requested.stippledBresenhamLines && !supported.stippledBresenhamLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeatures::stippledBresenhamLines"); + } + if (requested.stippledSmoothLines && !supported.stippledSmoothLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeatures::stippledSmoothLines"); + } +} +void merge_VkPhysicalDeviceLineRasterizationFeatures(VkPhysicalDeviceLineRasterizationFeatures & current, VkPhysicalDeviceLineRasterizationFeatures const& merge_in) { + current.rectangularLines = current.rectangularLines || merge_in.rectangularLines; + current.bresenhamLines = current.bresenhamLines || merge_in.bresenhamLines; + current.smoothLines = current.smoothLines || merge_in.smoothLines; + current.stippledRectangularLines = current.stippledRectangularLines || merge_in.stippledRectangularLines; + current.stippledBresenhamLines = current.stippledBresenhamLines || merge_in.stippledBresenhamLines; + current.stippledSmoothLines = current.stippledSmoothLines || merge_in.stippledSmoothLines; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization) || defined(VK_EXT_line_rasterization)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization)) +void compare_VkPhysicalDeviceLineRasterizationFeaturesKHR(std::vector & error_list, VkPhysicalDeviceLineRasterizationFeaturesKHR const& supported, VkPhysicalDeviceLineRasterizationFeaturesKHR const& requested) { + if (requested.rectangularLines && !supported.rectangularLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesKHR::rectangularLines"); + } + if (requested.bresenhamLines && !supported.bresenhamLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesKHR::bresenhamLines"); + } + if (requested.smoothLines && !supported.smoothLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesKHR::smoothLines"); + } + if (requested.stippledRectangularLines && !supported.stippledRectangularLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesKHR::stippledRectangularLines"); + } + if (requested.stippledBresenhamLines && !supported.stippledBresenhamLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesKHR::stippledBresenhamLines"); + } + if (requested.stippledSmoothLines && !supported.stippledSmoothLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesKHR::stippledSmoothLines"); + } +} +void merge_VkPhysicalDeviceLineRasterizationFeaturesKHR(VkPhysicalDeviceLineRasterizationFeaturesKHR & current, VkPhysicalDeviceLineRasterizationFeaturesKHR const& merge_in) { + current.rectangularLines = current.rectangularLines || merge_in.rectangularLines; + current.bresenhamLines = current.bresenhamLines || merge_in.bresenhamLines; + current.smoothLines = current.smoothLines || merge_in.smoothLines; + current.stippledRectangularLines = current.stippledRectangularLines || merge_in.stippledRectangularLines; + current.stippledBresenhamLines = current.stippledBresenhamLines || merge_in.stippledBresenhamLines; + current.stippledSmoothLines = current.stippledSmoothLines || merge_in.stippledSmoothLines; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_line_rasterization)) +void compare_VkPhysicalDeviceLineRasterizationFeaturesEXT(std::vector & error_list, VkPhysicalDeviceLineRasterizationFeaturesEXT const& supported, VkPhysicalDeviceLineRasterizationFeaturesEXT const& requested) { + if (requested.rectangularLines && !supported.rectangularLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesEXT::rectangularLines"); + } + if (requested.bresenhamLines && !supported.bresenhamLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesEXT::bresenhamLines"); + } + if (requested.smoothLines && !supported.smoothLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesEXT::smoothLines"); + } + if (requested.stippledRectangularLines && !supported.stippledRectangularLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesEXT::stippledRectangularLines"); + } + if (requested.stippledBresenhamLines && !supported.stippledBresenhamLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesEXT::stippledBresenhamLines"); + } + if (requested.stippledSmoothLines && !supported.stippledSmoothLines) { + error_list.push_back("Missing feature VkPhysicalDeviceLineRasterizationFeaturesEXT::stippledSmoothLines"); + } +} +void merge_VkPhysicalDeviceLineRasterizationFeaturesEXT(VkPhysicalDeviceLineRasterizationFeaturesEXT & current, VkPhysicalDeviceLineRasterizationFeaturesEXT const& merge_in) { + current.rectangularLines = current.rectangularLines || merge_in.rectangularLines; + current.bresenhamLines = current.bresenhamLines || merge_in.bresenhamLines; + current.smoothLines = current.smoothLines || merge_in.smoothLines; + current.stippledRectangularLines = current.stippledRectangularLines || merge_in.stippledRectangularLines; + current.stippledBresenhamLines = current.stippledBresenhamLines || merge_in.stippledBresenhamLines; + current.stippledSmoothLines = current.stippledSmoothLines || merge_in.stippledSmoothLines; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_line_rasterization)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor) || defined(VK_EXT_vertex_attribute_divisor)) +void compare_VkPhysicalDeviceVertexAttributeDivisorFeatures(std::vector & error_list, VkPhysicalDeviceVertexAttributeDivisorFeatures const& supported, VkPhysicalDeviceVertexAttributeDivisorFeatures const& requested) { + if (requested.vertexAttributeInstanceRateDivisor && !supported.vertexAttributeInstanceRateDivisor) { + error_list.push_back("Missing feature VkPhysicalDeviceVertexAttributeDivisorFeatures::vertexAttributeInstanceRateDivisor"); + } + if (requested.vertexAttributeInstanceRateZeroDivisor && !supported.vertexAttributeInstanceRateZeroDivisor) { + error_list.push_back("Missing feature VkPhysicalDeviceVertexAttributeDivisorFeatures::vertexAttributeInstanceRateZeroDivisor"); + } +} +void merge_VkPhysicalDeviceVertexAttributeDivisorFeatures(VkPhysicalDeviceVertexAttributeDivisorFeatures & current, VkPhysicalDeviceVertexAttributeDivisorFeatures const& merge_in) { + current.vertexAttributeInstanceRateDivisor = current.vertexAttributeInstanceRateDivisor || merge_in.vertexAttributeInstanceRateDivisor; + current.vertexAttributeInstanceRateZeroDivisor = current.vertexAttributeInstanceRateZeroDivisor || merge_in.vertexAttributeInstanceRateZeroDivisor; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor) || defined(VK_EXT_vertex_attribute_divisor)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor)) +void compare_VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const& supported, VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const& requested) { + if (requested.vertexAttributeInstanceRateDivisor && !supported.vertexAttributeInstanceRateDivisor) { + error_list.push_back("Missing feature VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR::vertexAttributeInstanceRateDivisor"); + } + if (requested.vertexAttributeInstanceRateZeroDivisor && !supported.vertexAttributeInstanceRateZeroDivisor) { + error_list.push_back("Missing feature VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR::vertexAttributeInstanceRateZeroDivisor"); + } +} +void merge_VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR(VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR & current, VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const& merge_in) { + current.vertexAttributeInstanceRateDivisor = current.vertexAttributeInstanceRateDivisor || merge_in.vertexAttributeInstanceRateDivisor; + current.vertexAttributeInstanceRateZeroDivisor = current.vertexAttributeInstanceRateZeroDivisor || merge_in.vertexAttributeInstanceRateZeroDivisor; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor)) +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_vertex_attribute_divisor)) +void compare_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(std::vector & error_list, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const& supported, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const& requested) { + if (requested.vertexAttributeInstanceRateDivisor && !supported.vertexAttributeInstanceRateDivisor) { + error_list.push_back("Missing feature VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT::vertexAttributeInstanceRateDivisor"); + } + if (requested.vertexAttributeInstanceRateZeroDivisor && !supported.vertexAttributeInstanceRateZeroDivisor) { + error_list.push_back("Missing feature VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT::vertexAttributeInstanceRateZeroDivisor"); + } +} +void merge_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT & current, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const& merge_in) { + current.vertexAttributeInstanceRateDivisor = current.vertexAttributeInstanceRateDivisor || merge_in.vertexAttributeInstanceRateDivisor; + current.vertexAttributeInstanceRateZeroDivisor = current.vertexAttributeInstanceRateZeroDivisor || merge_in.vertexAttributeInstanceRateZeroDivisor; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_EXT_vertex_attribute_divisor)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) +void compare_VkPhysicalDeviceDynamicRenderingLocalReadFeatures(std::vector & error_list, VkPhysicalDeviceDynamicRenderingLocalReadFeatures const& supported, VkPhysicalDeviceDynamicRenderingLocalReadFeatures const& requested) { + if (requested.dynamicRenderingLocalRead && !supported.dynamicRenderingLocalRead) { + error_list.push_back("Missing feature VkPhysicalDeviceDynamicRenderingLocalReadFeatures::dynamicRenderingLocalRead"); + } +} +void merge_VkPhysicalDeviceDynamicRenderingLocalReadFeatures(VkPhysicalDeviceDynamicRenderingLocalReadFeatures & current, VkPhysicalDeviceDynamicRenderingLocalReadFeatures const& merge_in) { + current.dynamicRenderingLocalRead = current.dynamicRenderingLocalRead || merge_in.dynamicRenderingLocalRead; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) +void compare_VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR(std::vector & error_list, VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const& supported, VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const& requested) { + if (requested.dynamicRenderingLocalRead && !supported.dynamicRenderingLocalRead) { + error_list.push_back("Missing feature VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR::dynamicRenderingLocalRead"); + } +} +void merge_VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR(VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR & current, VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const& merge_in) { + current.dynamicRenderingLocalRead = current.dynamicRenderingLocalRead || merge_in.dynamicRenderingLocalRead; +} +#endif //(defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) +#if (defined(VK_KHR_performance_query)) +void compare_VkPhysicalDevicePerformanceQueryFeaturesKHR(std::vector & error_list, VkPhysicalDevicePerformanceQueryFeaturesKHR const& supported, VkPhysicalDevicePerformanceQueryFeaturesKHR const& requested) { + if (requested.performanceCounterQueryPools && !supported.performanceCounterQueryPools) { + error_list.push_back("Missing feature VkPhysicalDevicePerformanceQueryFeaturesKHR::performanceCounterQueryPools"); + } + if (requested.performanceCounterMultipleQueryPools && !supported.performanceCounterMultipleQueryPools) { + error_list.push_back("Missing feature VkPhysicalDevicePerformanceQueryFeaturesKHR::performanceCounterMultipleQueryPools"); + } +} +void merge_VkPhysicalDevicePerformanceQueryFeaturesKHR(VkPhysicalDevicePerformanceQueryFeaturesKHR & current, VkPhysicalDevicePerformanceQueryFeaturesKHR const& merge_in) { + current.performanceCounterQueryPools = current.performanceCounterQueryPools || merge_in.performanceCounterQueryPools; + current.performanceCounterMultipleQueryPools = current.performanceCounterMultipleQueryPools || merge_in.performanceCounterMultipleQueryPools; +} +#endif //(defined(VK_KHR_performance_query)) +#if (defined(VK_KHR_shader_bfloat16)) +void compare_VkPhysicalDeviceShaderBfloat16FeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderBfloat16FeaturesKHR const& supported, VkPhysicalDeviceShaderBfloat16FeaturesKHR const& requested) { + if (requested.shaderBFloat16Type && !supported.shaderBFloat16Type) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderBfloat16FeaturesKHR::shaderBFloat16Type"); + } + if (requested.shaderBFloat16DotProduct && !supported.shaderBFloat16DotProduct) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderBfloat16FeaturesKHR::shaderBFloat16DotProduct"); + } + if (requested.shaderBFloat16CooperativeMatrix && !supported.shaderBFloat16CooperativeMatrix) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderBfloat16FeaturesKHR::shaderBFloat16CooperativeMatrix"); + } +} +void merge_VkPhysicalDeviceShaderBfloat16FeaturesKHR(VkPhysicalDeviceShaderBfloat16FeaturesKHR & current, VkPhysicalDeviceShaderBfloat16FeaturesKHR const& merge_in) { + current.shaderBFloat16Type = current.shaderBFloat16Type || merge_in.shaderBFloat16Type; + current.shaderBFloat16DotProduct = current.shaderBFloat16DotProduct || merge_in.shaderBFloat16DotProduct; + current.shaderBFloat16CooperativeMatrix = current.shaderBFloat16CooperativeMatrix || merge_in.shaderBFloat16CooperativeMatrix; +} +#endif //(defined(VK_KHR_shader_bfloat16)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_KHR_portability_subset)) +void compare_VkPhysicalDevicePortabilitySubsetFeaturesKHR(std::vector & error_list, VkPhysicalDevicePortabilitySubsetFeaturesKHR const& supported, VkPhysicalDevicePortabilitySubsetFeaturesKHR const& requested) { + if (requested.constantAlphaColorBlendFactors && !supported.constantAlphaColorBlendFactors) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::constantAlphaColorBlendFactors"); + } + if (requested.events && !supported.events) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::events"); + } + if (requested.imageViewFormatReinterpretation && !supported.imageViewFormatReinterpretation) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::imageViewFormatReinterpretation"); + } + if (requested.imageViewFormatSwizzle && !supported.imageViewFormatSwizzle) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::imageViewFormatSwizzle"); + } + if (requested.imageView2DOn3DImage && !supported.imageView2DOn3DImage) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::imageView2DOn3DImage"); + } + if (requested.multisampleArrayImage && !supported.multisampleArrayImage) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::multisampleArrayImage"); + } + if (requested.mutableComparisonSamplers && !supported.mutableComparisonSamplers) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::mutableComparisonSamplers"); + } + if (requested.pointPolygons && !supported.pointPolygons) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::pointPolygons"); + } + if (requested.samplerMipLodBias && !supported.samplerMipLodBias) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::samplerMipLodBias"); + } + if (requested.separateStencilMaskRef && !supported.separateStencilMaskRef) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::separateStencilMaskRef"); + } + if (requested.shaderSampleRateInterpolationFunctions && !supported.shaderSampleRateInterpolationFunctions) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::shaderSampleRateInterpolationFunctions"); + } + if (requested.tessellationIsolines && !supported.tessellationIsolines) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::tessellationIsolines"); + } + if (requested.tessellationPointMode && !supported.tessellationPointMode) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::tessellationPointMode"); + } + if (requested.triangleFans && !supported.triangleFans) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::triangleFans"); + } + if (requested.vertexAttributeAccessBeyondStride && !supported.vertexAttributeAccessBeyondStride) { + error_list.push_back("Missing feature VkPhysicalDevicePortabilitySubsetFeaturesKHR::vertexAttributeAccessBeyondStride"); + } +} +void merge_VkPhysicalDevicePortabilitySubsetFeaturesKHR(VkPhysicalDevicePortabilitySubsetFeaturesKHR & current, VkPhysicalDevicePortabilitySubsetFeaturesKHR const& merge_in) { + current.constantAlphaColorBlendFactors = current.constantAlphaColorBlendFactors || merge_in.constantAlphaColorBlendFactors; + current.events = current.events || merge_in.events; + current.imageViewFormatReinterpretation = current.imageViewFormatReinterpretation || merge_in.imageViewFormatReinterpretation; + current.imageViewFormatSwizzle = current.imageViewFormatSwizzle || merge_in.imageViewFormatSwizzle; + current.imageView2DOn3DImage = current.imageView2DOn3DImage || merge_in.imageView2DOn3DImage; + current.multisampleArrayImage = current.multisampleArrayImage || merge_in.multisampleArrayImage; + current.mutableComparisonSamplers = current.mutableComparisonSamplers || merge_in.mutableComparisonSamplers; + current.pointPolygons = current.pointPolygons || merge_in.pointPolygons; + current.samplerMipLodBias = current.samplerMipLodBias || merge_in.samplerMipLodBias; + current.separateStencilMaskRef = current.separateStencilMaskRef || merge_in.separateStencilMaskRef; + current.shaderSampleRateInterpolationFunctions = current.shaderSampleRateInterpolationFunctions || merge_in.shaderSampleRateInterpolationFunctions; + current.tessellationIsolines = current.tessellationIsolines || merge_in.tessellationIsolines; + current.tessellationPointMode = current.tessellationPointMode || merge_in.tessellationPointMode; + current.triangleFans = current.triangleFans || merge_in.triangleFans; + current.vertexAttributeAccessBeyondStride = current.vertexAttributeAccessBeyondStride || merge_in.vertexAttributeAccessBeyondStride; +} +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_KHR_portability_subset)) +#if (defined(VK_KHR_shader_clock)) +void compare_VkPhysicalDeviceShaderClockFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderClockFeaturesKHR const& supported, VkPhysicalDeviceShaderClockFeaturesKHR const& requested) { + if (requested.shaderSubgroupClock && !supported.shaderSubgroupClock) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderClockFeaturesKHR::shaderSubgroupClock"); + } + if (requested.shaderDeviceClock && !supported.shaderDeviceClock) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderClockFeaturesKHR::shaderDeviceClock"); + } +} +void merge_VkPhysicalDeviceShaderClockFeaturesKHR(VkPhysicalDeviceShaderClockFeaturesKHR & current, VkPhysicalDeviceShaderClockFeaturesKHR const& merge_in) { + current.shaderSubgroupClock = current.shaderSubgroupClock || merge_in.shaderSubgroupClock; + current.shaderDeviceClock = current.shaderDeviceClock || merge_in.shaderDeviceClock; +} +#endif //(defined(VK_KHR_shader_clock)) +#if (defined(VK_KHR_fragment_shading_rate)) +void compare_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(std::vector & error_list, VkPhysicalDeviceFragmentShadingRateFeaturesKHR const& supported, VkPhysicalDeviceFragmentShadingRateFeaturesKHR const& requested) { + if (requested.pipelineFragmentShadingRate && !supported.pipelineFragmentShadingRate) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShadingRateFeaturesKHR::pipelineFragmentShadingRate"); + } + if (requested.primitiveFragmentShadingRate && !supported.primitiveFragmentShadingRate) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShadingRateFeaturesKHR::primitiveFragmentShadingRate"); + } + if (requested.attachmentFragmentShadingRate && !supported.attachmentFragmentShadingRate) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShadingRateFeaturesKHR::attachmentFragmentShadingRate"); + } +} +void merge_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(VkPhysicalDeviceFragmentShadingRateFeaturesKHR & current, VkPhysicalDeviceFragmentShadingRateFeaturesKHR const& merge_in) { + current.pipelineFragmentShadingRate = current.pipelineFragmentShadingRate || merge_in.pipelineFragmentShadingRate; + current.primitiveFragmentShadingRate = current.primitiveFragmentShadingRate || merge_in.primitiveFragmentShadingRate; + current.attachmentFragmentShadingRate = current.attachmentFragmentShadingRate || merge_in.attachmentFragmentShadingRate; +} +#endif //(defined(VK_KHR_fragment_shading_rate)) +#if (defined(VK_KHR_shader_quad_control)) +void compare_VkPhysicalDeviceShaderQuadControlFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderQuadControlFeaturesKHR const& supported, VkPhysicalDeviceShaderQuadControlFeaturesKHR const& requested) { + if (requested.shaderQuadControl && !supported.shaderQuadControl) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderQuadControlFeaturesKHR::shaderQuadControl"); + } +} +void merge_VkPhysicalDeviceShaderQuadControlFeaturesKHR(VkPhysicalDeviceShaderQuadControlFeaturesKHR & current, VkPhysicalDeviceShaderQuadControlFeaturesKHR const& merge_in) { + current.shaderQuadControl = current.shaderQuadControl || merge_in.shaderQuadControl; +} +#endif //(defined(VK_KHR_shader_quad_control)) +#if (defined(VK_KHR_present_wait)) +void compare_VkPhysicalDevicePresentWaitFeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentWaitFeaturesKHR const& supported, VkPhysicalDevicePresentWaitFeaturesKHR const& requested) { + if (requested.presentWait && !supported.presentWait) { + error_list.push_back("Missing feature VkPhysicalDevicePresentWaitFeaturesKHR::presentWait"); + } +} +void merge_VkPhysicalDevicePresentWaitFeaturesKHR(VkPhysicalDevicePresentWaitFeaturesKHR & current, VkPhysicalDevicePresentWaitFeaturesKHR const& merge_in) { + current.presentWait = current.presentWait || merge_in.presentWait; +} +#endif //(defined(VK_KHR_present_wait)) +#if (defined(VK_KHR_pipeline_executable_properties)) +void compare_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(std::vector & error_list, VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& supported, VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& requested) { + if (requested.pipelineExecutableInfo && !supported.pipelineExecutableInfo) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR::pipelineExecutableInfo"); + } +} +void merge_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR & current, VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& merge_in) { + current.pipelineExecutableInfo = current.pipelineExecutableInfo || merge_in.pipelineExecutableInfo; +} +#endif //(defined(VK_KHR_pipeline_executable_properties)) +#if (defined(VK_KHR_present_id)) +void compare_VkPhysicalDevicePresentIdFeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentIdFeaturesKHR const& supported, VkPhysicalDevicePresentIdFeaturesKHR const& requested) { + if (requested.presentId && !supported.presentId) { + error_list.push_back("Missing feature VkPhysicalDevicePresentIdFeaturesKHR::presentId"); + } +} +void merge_VkPhysicalDevicePresentIdFeaturesKHR(VkPhysicalDevicePresentIdFeaturesKHR & current, VkPhysicalDevicePresentIdFeaturesKHR const& merge_in) { + current.presentId = current.presentId || merge_in.presentId; +} +#endif //(defined(VK_KHR_present_id)) +#if (defined(VK_KHR_fragment_shader_barycentric)) +void compare_VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR(std::vector & error_list, VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR const& supported, VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR const& requested) { + if (requested.fragmentShaderBarycentric && !supported.fragmentShaderBarycentric) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR::fragmentShaderBarycentric"); + } +} +void merge_VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR(VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR & current, VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR const& merge_in) { + current.fragmentShaderBarycentric = current.fragmentShaderBarycentric || merge_in.fragmentShaderBarycentric; +} +#endif //(defined(VK_KHR_fragment_shader_barycentric)) +#if (defined(VK_NV_fragment_shader_barycentric)) +void compare_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(std::vector & error_list, VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const& supported, VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const& requested) { + if (requested.fragmentShaderBarycentric && !supported.fragmentShaderBarycentric) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV::fragmentShaderBarycentric"); + } +} +void merge_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV & current, VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const& merge_in) { + current.fragmentShaderBarycentric = current.fragmentShaderBarycentric || merge_in.fragmentShaderBarycentric; +} +#endif //(defined(VK_NV_fragment_shader_barycentric)) +#if (defined(VK_KHR_shader_subgroup_uniform_control_flow)) +void compare_VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR const& supported, VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR const& requested) { + if (requested.shaderSubgroupUniformControlFlow && !supported.shaderSubgroupUniformControlFlow) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR::shaderSubgroupUniformControlFlow"); + } +} +void merge_VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR(VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR & current, VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR const& merge_in) { + current.shaderSubgroupUniformControlFlow = current.shaderSubgroupUniformControlFlow || merge_in.shaderSubgroupUniformControlFlow; +} +#endif //(defined(VK_KHR_shader_subgroup_uniform_control_flow)) +#if (defined(VK_KHR_workgroup_memory_explicit_layout)) +void compare_VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR(std::vector & error_list, VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR const& supported, VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR const& requested) { + if (requested.workgroupMemoryExplicitLayout && !supported.workgroupMemoryExplicitLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR::workgroupMemoryExplicitLayout"); + } + if (requested.workgroupMemoryExplicitLayoutScalarBlockLayout && !supported.workgroupMemoryExplicitLayoutScalarBlockLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR::workgroupMemoryExplicitLayoutScalarBlockLayout"); + } + if (requested.workgroupMemoryExplicitLayout8BitAccess && !supported.workgroupMemoryExplicitLayout8BitAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR::workgroupMemoryExplicitLayout8BitAccess"); + } + if (requested.workgroupMemoryExplicitLayout16BitAccess && !supported.workgroupMemoryExplicitLayout16BitAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR::workgroupMemoryExplicitLayout16BitAccess"); + } +} +void merge_VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR(VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR & current, VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR const& merge_in) { + current.workgroupMemoryExplicitLayout = current.workgroupMemoryExplicitLayout || merge_in.workgroupMemoryExplicitLayout; + current.workgroupMemoryExplicitLayoutScalarBlockLayout = current.workgroupMemoryExplicitLayoutScalarBlockLayout || merge_in.workgroupMemoryExplicitLayoutScalarBlockLayout; + current.workgroupMemoryExplicitLayout8BitAccess = current.workgroupMemoryExplicitLayout8BitAccess || merge_in.workgroupMemoryExplicitLayout8BitAccess; + current.workgroupMemoryExplicitLayout16BitAccess = current.workgroupMemoryExplicitLayout16BitAccess || merge_in.workgroupMemoryExplicitLayout16BitAccess; +} +#endif //(defined(VK_KHR_workgroup_memory_explicit_layout)) +#if (defined(VK_KHR_ray_tracing_maintenance1)) +void compare_VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR(std::vector & error_list, VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR const& supported, VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR const& requested) { + if (requested.rayTracingMaintenance1 && !supported.rayTracingMaintenance1) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR::rayTracingMaintenance1"); + } + if (requested.rayTracingPipelineTraceRaysIndirect2 && !supported.rayTracingPipelineTraceRaysIndirect2) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR::rayTracingPipelineTraceRaysIndirect2"); + } +} +void merge_VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR(VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR & current, VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR const& merge_in) { + current.rayTracingMaintenance1 = current.rayTracingMaintenance1 || merge_in.rayTracingMaintenance1; + current.rayTracingPipelineTraceRaysIndirect2 = current.rayTracingPipelineTraceRaysIndirect2 || merge_in.rayTracingPipelineTraceRaysIndirect2; +} +#endif //(defined(VK_KHR_ray_tracing_maintenance1)) +#if (defined(VK_KHR_shader_untyped_pointers)) +void compare_VkPhysicalDeviceShaderUntypedPointersFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderUntypedPointersFeaturesKHR const& supported, VkPhysicalDeviceShaderUntypedPointersFeaturesKHR const& requested) { + if (requested.shaderUntypedPointers && !supported.shaderUntypedPointers) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderUntypedPointersFeaturesKHR::shaderUntypedPointers"); + } +} +void merge_VkPhysicalDeviceShaderUntypedPointersFeaturesKHR(VkPhysicalDeviceShaderUntypedPointersFeaturesKHR & current, VkPhysicalDeviceShaderUntypedPointersFeaturesKHR const& merge_in) { + current.shaderUntypedPointers = current.shaderUntypedPointers || merge_in.shaderUntypedPointers; +} +#endif //(defined(VK_KHR_shader_untyped_pointers)) +#if (defined(VK_KHR_shader_maximal_reconvergence)) +void compare_VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR const& supported, VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR const& requested) { + if (requested.shaderMaximalReconvergence && !supported.shaderMaximalReconvergence) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR::shaderMaximalReconvergence"); + } +} +void merge_VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR(VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR & current, VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR const& merge_in) { + current.shaderMaximalReconvergence = current.shaderMaximalReconvergence || merge_in.shaderMaximalReconvergence; +} +#endif //(defined(VK_KHR_shader_maximal_reconvergence)) +#if (defined(VK_KHR_present_id2)) +void compare_VkPhysicalDevicePresentId2FeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentId2FeaturesKHR const& supported, VkPhysicalDevicePresentId2FeaturesKHR const& requested) { + if (requested.presentId2 && !supported.presentId2) { + error_list.push_back("Missing feature VkPhysicalDevicePresentId2FeaturesKHR::presentId2"); + } +} +void merge_VkPhysicalDevicePresentId2FeaturesKHR(VkPhysicalDevicePresentId2FeaturesKHR & current, VkPhysicalDevicePresentId2FeaturesKHR const& merge_in) { + current.presentId2 = current.presentId2 || merge_in.presentId2; +} +#endif //(defined(VK_KHR_present_id2)) +#if (defined(VK_KHR_present_wait2)) +void compare_VkPhysicalDevicePresentWait2FeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentWait2FeaturesKHR const& supported, VkPhysicalDevicePresentWait2FeaturesKHR const& requested) { + if (requested.presentWait2 && !supported.presentWait2) { + error_list.push_back("Missing feature VkPhysicalDevicePresentWait2FeaturesKHR::presentWait2"); + } +} +void merge_VkPhysicalDevicePresentWait2FeaturesKHR(VkPhysicalDevicePresentWait2FeaturesKHR & current, VkPhysicalDevicePresentWait2FeaturesKHR const& merge_in) { + current.presentWait2 = current.presentWait2 || merge_in.presentWait2; +} +#endif //(defined(VK_KHR_present_wait2)) +#if (defined(VK_KHR_ray_tracing_position_fetch)) +void compare_VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR(std::vector & error_list, VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR const& supported, VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR const& requested) { + if (requested.rayTracingPositionFetch && !supported.rayTracingPositionFetch) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR::rayTracingPositionFetch"); + } +} +void merge_VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR(VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR & current, VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR const& merge_in) { + current.rayTracingPositionFetch = current.rayTracingPositionFetch || merge_in.rayTracingPositionFetch; +} +#endif //(defined(VK_KHR_ray_tracing_position_fetch)) +#if (defined(VK_KHR_pipeline_binary)) +void compare_VkPhysicalDevicePipelineBinaryFeaturesKHR(std::vector & error_list, VkPhysicalDevicePipelineBinaryFeaturesKHR const& supported, VkPhysicalDevicePipelineBinaryFeaturesKHR const& requested) { + if (requested.pipelineBinaries && !supported.pipelineBinaries) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineBinaryFeaturesKHR::pipelineBinaries"); + } +} +void merge_VkPhysicalDevicePipelineBinaryFeaturesKHR(VkPhysicalDevicePipelineBinaryFeaturesKHR & current, VkPhysicalDevicePipelineBinaryFeaturesKHR const& merge_in) { + current.pipelineBinaries = current.pipelineBinaries || merge_in.pipelineBinaries; +} +#endif //(defined(VK_KHR_pipeline_binary)) +#if (defined(VK_KHR_swapchain_maintenance1)) +void compare_VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR(std::vector & error_list, VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR const& supported, VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR const& requested) { + if (requested.swapchainMaintenance1 && !supported.swapchainMaintenance1) { + error_list.push_back("Missing feature VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR::swapchainMaintenance1"); + } +} +void merge_VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR(VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR & current, VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR const& merge_in) { + current.swapchainMaintenance1 = current.swapchainMaintenance1 || merge_in.swapchainMaintenance1; +} +#endif //(defined(VK_KHR_swapchain_maintenance1)) +#if (defined(VK_EXT_swapchain_maintenance1)) +void compare_VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT(std::vector & error_list, VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT const& supported, VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT const& requested) { + if (requested.swapchainMaintenance1 && !supported.swapchainMaintenance1) { + error_list.push_back("Missing feature VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT::swapchainMaintenance1"); + } +} +void merge_VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT(VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT & current, VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT const& merge_in) { + current.swapchainMaintenance1 = current.swapchainMaintenance1 || merge_in.swapchainMaintenance1; +} +#endif //(defined(VK_EXT_swapchain_maintenance1)) +#if (defined(VK_KHR_cooperative_matrix)) +void compare_VkPhysicalDeviceCooperativeMatrixFeaturesKHR(std::vector & error_list, VkPhysicalDeviceCooperativeMatrixFeaturesKHR const& supported, VkPhysicalDeviceCooperativeMatrixFeaturesKHR const& requested) { + if (requested.cooperativeMatrix && !supported.cooperativeMatrix) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrixFeaturesKHR::cooperativeMatrix"); + } + if (requested.cooperativeMatrixRobustBufferAccess && !supported.cooperativeMatrixRobustBufferAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrixFeaturesKHR::cooperativeMatrixRobustBufferAccess"); + } +} +void merge_VkPhysicalDeviceCooperativeMatrixFeaturesKHR(VkPhysicalDeviceCooperativeMatrixFeaturesKHR & current, VkPhysicalDeviceCooperativeMatrixFeaturesKHR const& merge_in) { + current.cooperativeMatrix = current.cooperativeMatrix || merge_in.cooperativeMatrix; + current.cooperativeMatrixRobustBufferAccess = current.cooperativeMatrixRobustBufferAccess || merge_in.cooperativeMatrixRobustBufferAccess; +} +#endif //(defined(VK_KHR_cooperative_matrix)) +#if (defined(VK_KHR_compute_shader_derivatives)) +void compare_VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR(std::vector & error_list, VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const& supported, VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const& requested) { + if (requested.computeDerivativeGroupQuads && !supported.computeDerivativeGroupQuads) { + error_list.push_back("Missing feature VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR::computeDerivativeGroupQuads"); + } + if (requested.computeDerivativeGroupLinear && !supported.computeDerivativeGroupLinear) { + error_list.push_back("Missing feature VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR::computeDerivativeGroupLinear"); + } +} +void merge_VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR(VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR & current, VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const& merge_in) { + current.computeDerivativeGroupQuads = current.computeDerivativeGroupQuads || merge_in.computeDerivativeGroupQuads; + current.computeDerivativeGroupLinear = current.computeDerivativeGroupLinear || merge_in.computeDerivativeGroupLinear; +} +#endif //(defined(VK_KHR_compute_shader_derivatives)) +#if (defined(VK_NV_compute_shader_derivatives)) +void compare_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(std::vector & error_list, VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const& supported, VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const& requested) { + if (requested.computeDerivativeGroupQuads && !supported.computeDerivativeGroupQuads) { + error_list.push_back("Missing feature VkPhysicalDeviceComputeShaderDerivativesFeaturesNV::computeDerivativeGroupQuads"); + } + if (requested.computeDerivativeGroupLinear && !supported.computeDerivativeGroupLinear) { + error_list.push_back("Missing feature VkPhysicalDeviceComputeShaderDerivativesFeaturesNV::computeDerivativeGroupLinear"); + } +} +void merge_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(VkPhysicalDeviceComputeShaderDerivativesFeaturesNV & current, VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const& merge_in) { + current.computeDerivativeGroupQuads = current.computeDerivativeGroupQuads || merge_in.computeDerivativeGroupQuads; + current.computeDerivativeGroupLinear = current.computeDerivativeGroupLinear || merge_in.computeDerivativeGroupLinear; +} +#endif //(defined(VK_NV_compute_shader_derivatives)) +#if (defined(VK_KHR_video_encode_av1)) +void compare_VkPhysicalDeviceVideoEncodeAV1FeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoEncodeAV1FeaturesKHR const& supported, VkPhysicalDeviceVideoEncodeAV1FeaturesKHR const& requested) { + if (requested.videoEncodeAV1 && !supported.videoEncodeAV1) { + error_list.push_back("Missing feature VkPhysicalDeviceVideoEncodeAV1FeaturesKHR::videoEncodeAV1"); + } +} +void merge_VkPhysicalDeviceVideoEncodeAV1FeaturesKHR(VkPhysicalDeviceVideoEncodeAV1FeaturesKHR & current, VkPhysicalDeviceVideoEncodeAV1FeaturesKHR const& merge_in) { + current.videoEncodeAV1 = current.videoEncodeAV1 || merge_in.videoEncodeAV1; +} +#endif //(defined(VK_KHR_video_encode_av1)) +#if (defined(VK_KHR_video_decode_vp9)) +void compare_VkPhysicalDeviceVideoDecodeVP9FeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoDecodeVP9FeaturesKHR const& supported, VkPhysicalDeviceVideoDecodeVP9FeaturesKHR const& requested) { + if (requested.videoDecodeVP9 && !supported.videoDecodeVP9) { + error_list.push_back("Missing feature VkPhysicalDeviceVideoDecodeVP9FeaturesKHR::videoDecodeVP9"); + } +} +void merge_VkPhysicalDeviceVideoDecodeVP9FeaturesKHR(VkPhysicalDeviceVideoDecodeVP9FeaturesKHR & current, VkPhysicalDeviceVideoDecodeVP9FeaturesKHR const& merge_in) { + current.videoDecodeVP9 = current.videoDecodeVP9 || merge_in.videoDecodeVP9; +} +#endif //(defined(VK_KHR_video_decode_vp9)) +#if (defined(VK_KHR_video_maintenance1)) +void compare_VkPhysicalDeviceVideoMaintenance1FeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoMaintenance1FeaturesKHR const& supported, VkPhysicalDeviceVideoMaintenance1FeaturesKHR const& requested) { + if (requested.videoMaintenance1 && !supported.videoMaintenance1) { + error_list.push_back("Missing feature VkPhysicalDeviceVideoMaintenance1FeaturesKHR::videoMaintenance1"); + } +} +void merge_VkPhysicalDeviceVideoMaintenance1FeaturesKHR(VkPhysicalDeviceVideoMaintenance1FeaturesKHR & current, VkPhysicalDeviceVideoMaintenance1FeaturesKHR const& merge_in) { + current.videoMaintenance1 = current.videoMaintenance1 || merge_in.videoMaintenance1; +} +#endif //(defined(VK_KHR_video_maintenance1)) +#if (defined(VK_KHR_unified_image_layouts)) +void compare_VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR(std::vector & error_list, VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR const& supported, VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR const& requested) { + if (requested.unifiedImageLayouts && !supported.unifiedImageLayouts) { + error_list.push_back("Missing feature VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR::unifiedImageLayouts"); + } + if (requested.unifiedImageLayoutsVideo && !supported.unifiedImageLayoutsVideo) { + error_list.push_back("Missing feature VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR::unifiedImageLayoutsVideo"); + } +} +void merge_VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR(VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR & current, VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR const& merge_in) { + current.unifiedImageLayouts = current.unifiedImageLayouts || merge_in.unifiedImageLayouts; + current.unifiedImageLayoutsVideo = current.unifiedImageLayoutsVideo || merge_in.unifiedImageLayoutsVideo; +} +#endif //(defined(VK_KHR_unified_image_layouts)) +#if (defined(VK_KHR_copy_memory_indirect)) +void compare_VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR(std::vector & error_list, VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR const& supported, VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR const& requested) { + if (requested.indirectMemoryCopy && !supported.indirectMemoryCopy) { + error_list.push_back("Missing feature VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR::indirectMemoryCopy"); + } + if (requested.indirectMemoryToImageCopy && !supported.indirectMemoryToImageCopy) { + error_list.push_back("Missing feature VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR::indirectMemoryToImageCopy"); + } +} +void merge_VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR(VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR & current, VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR const& merge_in) { + current.indirectMemoryCopy = current.indirectMemoryCopy || merge_in.indirectMemoryCopy; + current.indirectMemoryToImageCopy = current.indirectMemoryToImageCopy || merge_in.indirectMemoryToImageCopy; +} +#endif //(defined(VK_KHR_copy_memory_indirect)) +#if (defined(VK_KHR_video_encode_intra_refresh)) +void compare_VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR const& supported, VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR const& requested) { + if (requested.videoEncodeIntraRefresh && !supported.videoEncodeIntraRefresh) { + error_list.push_back("Missing feature VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR::videoEncodeIntraRefresh"); + } +} +void merge_VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR(VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR & current, VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR const& merge_in) { + current.videoEncodeIntraRefresh = current.videoEncodeIntraRefresh || merge_in.videoEncodeIntraRefresh; +} +#endif //(defined(VK_KHR_video_encode_intra_refresh)) +#if (defined(VK_KHR_video_encode_quantization_map)) +void compare_VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR const& supported, VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR const& requested) { + if (requested.videoEncodeQuantizationMap && !supported.videoEncodeQuantizationMap) { + error_list.push_back("Missing feature VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR::videoEncodeQuantizationMap"); + } +} +void merge_VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR(VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR & current, VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR const& merge_in) { + current.videoEncodeQuantizationMap = current.videoEncodeQuantizationMap || merge_in.videoEncodeQuantizationMap; +} +#endif //(defined(VK_KHR_video_encode_quantization_map)) +#if (defined(VK_KHR_shader_relaxed_extended_instruction)) +void compare_VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const& supported, VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const& requested) { + if (requested.shaderRelaxedExtendedInstruction && !supported.shaderRelaxedExtendedInstruction) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR::shaderRelaxedExtendedInstruction"); + } +} +void merge_VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR(VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR & current, VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const& merge_in) { + current.shaderRelaxedExtendedInstruction = current.shaderRelaxedExtendedInstruction || merge_in.shaderRelaxedExtendedInstruction; +} +#endif //(defined(VK_KHR_shader_relaxed_extended_instruction)) +#if (defined(VK_KHR_maintenance7)) +void compare_VkPhysicalDeviceMaintenance7FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance7FeaturesKHR const& supported, VkPhysicalDeviceMaintenance7FeaturesKHR const& requested) { + if (requested.maintenance7 && !supported.maintenance7) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance7FeaturesKHR::maintenance7"); + } +} +void merge_VkPhysicalDeviceMaintenance7FeaturesKHR(VkPhysicalDeviceMaintenance7FeaturesKHR & current, VkPhysicalDeviceMaintenance7FeaturesKHR const& merge_in) { + current.maintenance7 = current.maintenance7 || merge_in.maintenance7; +} +#endif //(defined(VK_KHR_maintenance7)) +#if (defined(VK_KHR_maintenance8)) +void compare_VkPhysicalDeviceMaintenance8FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance8FeaturesKHR const& supported, VkPhysicalDeviceMaintenance8FeaturesKHR const& requested) { + if (requested.maintenance8 && !supported.maintenance8) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance8FeaturesKHR::maintenance8"); + } +} +void merge_VkPhysicalDeviceMaintenance8FeaturesKHR(VkPhysicalDeviceMaintenance8FeaturesKHR & current, VkPhysicalDeviceMaintenance8FeaturesKHR const& merge_in) { + current.maintenance8 = current.maintenance8 || merge_in.maintenance8; +} +#endif //(defined(VK_KHR_maintenance8)) +#if (defined(VK_KHR_shader_fma)) +void compare_VkPhysicalDeviceShaderFmaFeaturesKHR(std::vector & error_list, VkPhysicalDeviceShaderFmaFeaturesKHR const& supported, VkPhysicalDeviceShaderFmaFeaturesKHR const& requested) { + if (requested.shaderFmaFloat16 && !supported.shaderFmaFloat16) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFmaFeaturesKHR::shaderFmaFloat16"); + } + if (requested.shaderFmaFloat32 && !supported.shaderFmaFloat32) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFmaFeaturesKHR::shaderFmaFloat32"); + } + if (requested.shaderFmaFloat64 && !supported.shaderFmaFloat64) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFmaFeaturesKHR::shaderFmaFloat64"); + } +} +void merge_VkPhysicalDeviceShaderFmaFeaturesKHR(VkPhysicalDeviceShaderFmaFeaturesKHR & current, VkPhysicalDeviceShaderFmaFeaturesKHR const& merge_in) { + current.shaderFmaFloat16 = current.shaderFmaFloat16 || merge_in.shaderFmaFloat16; + current.shaderFmaFloat32 = current.shaderFmaFloat32 || merge_in.shaderFmaFloat32; + current.shaderFmaFloat64 = current.shaderFmaFloat64 || merge_in.shaderFmaFloat64; +} +#endif //(defined(VK_KHR_shader_fma)) +#if (defined(VK_KHR_maintenance9)) +void compare_VkPhysicalDeviceMaintenance9FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance9FeaturesKHR const& supported, VkPhysicalDeviceMaintenance9FeaturesKHR const& requested) { + if (requested.maintenance9 && !supported.maintenance9) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance9FeaturesKHR::maintenance9"); + } +} +void merge_VkPhysicalDeviceMaintenance9FeaturesKHR(VkPhysicalDeviceMaintenance9FeaturesKHR & current, VkPhysicalDeviceMaintenance9FeaturesKHR const& merge_in) { + current.maintenance9 = current.maintenance9 || merge_in.maintenance9; +} +#endif //(defined(VK_KHR_maintenance9)) +#if (defined(VK_KHR_video_maintenance2)) +void compare_VkPhysicalDeviceVideoMaintenance2FeaturesKHR(std::vector & error_list, VkPhysicalDeviceVideoMaintenance2FeaturesKHR const& supported, VkPhysicalDeviceVideoMaintenance2FeaturesKHR const& requested) { + if (requested.videoMaintenance2 && !supported.videoMaintenance2) { + error_list.push_back("Missing feature VkPhysicalDeviceVideoMaintenance2FeaturesKHR::videoMaintenance2"); + } +} +void merge_VkPhysicalDeviceVideoMaintenance2FeaturesKHR(VkPhysicalDeviceVideoMaintenance2FeaturesKHR & current, VkPhysicalDeviceVideoMaintenance2FeaturesKHR const& merge_in) { + current.videoMaintenance2 = current.videoMaintenance2 || merge_in.videoMaintenance2; +} +#endif //(defined(VK_KHR_video_maintenance2)) +#if (defined(VK_KHR_depth_clamp_zero_one)) +void compare_VkPhysicalDeviceDepthClampZeroOneFeaturesKHR(std::vector & error_list, VkPhysicalDeviceDepthClampZeroOneFeaturesKHR const& supported, VkPhysicalDeviceDepthClampZeroOneFeaturesKHR const& requested) { + if (requested.depthClampZeroOne && !supported.depthClampZeroOne) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthClampZeroOneFeaturesKHR::depthClampZeroOne"); + } +} +void merge_VkPhysicalDeviceDepthClampZeroOneFeaturesKHR(VkPhysicalDeviceDepthClampZeroOneFeaturesKHR & current, VkPhysicalDeviceDepthClampZeroOneFeaturesKHR const& merge_in) { + current.depthClampZeroOne = current.depthClampZeroOne || merge_in.depthClampZeroOne; +} +#endif //(defined(VK_KHR_depth_clamp_zero_one)) +#if (defined(VK_EXT_depth_clamp_zero_one)) +void compare_VkPhysicalDeviceDepthClampZeroOneFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthClampZeroOneFeaturesEXT const& supported, VkPhysicalDeviceDepthClampZeroOneFeaturesEXT const& requested) { + if (requested.depthClampZeroOne && !supported.depthClampZeroOne) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthClampZeroOneFeaturesEXT::depthClampZeroOne"); + } +} +void merge_VkPhysicalDeviceDepthClampZeroOneFeaturesEXT(VkPhysicalDeviceDepthClampZeroOneFeaturesEXT & current, VkPhysicalDeviceDepthClampZeroOneFeaturesEXT const& merge_in) { + current.depthClampZeroOne = current.depthClampZeroOne || merge_in.depthClampZeroOne; +} +#endif //(defined(VK_EXT_depth_clamp_zero_one)) +#if (defined(VK_KHR_robustness2)) +void compare_VkPhysicalDeviceRobustness2FeaturesKHR(std::vector & error_list, VkPhysicalDeviceRobustness2FeaturesKHR const& supported, VkPhysicalDeviceRobustness2FeaturesKHR const& requested) { + if (requested.robustBufferAccess2 && !supported.robustBufferAccess2) { + error_list.push_back("Missing feature VkPhysicalDeviceRobustness2FeaturesKHR::robustBufferAccess2"); + } + if (requested.robustImageAccess2 && !supported.robustImageAccess2) { + error_list.push_back("Missing feature VkPhysicalDeviceRobustness2FeaturesKHR::robustImageAccess2"); + } + if (requested.nullDescriptor && !supported.nullDescriptor) { + error_list.push_back("Missing feature VkPhysicalDeviceRobustness2FeaturesKHR::nullDescriptor"); + } +} +void merge_VkPhysicalDeviceRobustness2FeaturesKHR(VkPhysicalDeviceRobustness2FeaturesKHR & current, VkPhysicalDeviceRobustness2FeaturesKHR const& merge_in) { + current.robustBufferAccess2 = current.robustBufferAccess2 || merge_in.robustBufferAccess2; + current.robustImageAccess2 = current.robustImageAccess2 || merge_in.robustImageAccess2; + current.nullDescriptor = current.nullDescriptor || merge_in.nullDescriptor; +} +#endif //(defined(VK_KHR_robustness2)) +#if (defined(VK_EXT_robustness2)) +void compare_VkPhysicalDeviceRobustness2FeaturesEXT(std::vector & error_list, VkPhysicalDeviceRobustness2FeaturesEXT const& supported, VkPhysicalDeviceRobustness2FeaturesEXT const& requested) { + if (requested.robustBufferAccess2 && !supported.robustBufferAccess2) { + error_list.push_back("Missing feature VkPhysicalDeviceRobustness2FeaturesEXT::robustBufferAccess2"); + } + if (requested.robustImageAccess2 && !supported.robustImageAccess2) { + error_list.push_back("Missing feature VkPhysicalDeviceRobustness2FeaturesEXT::robustImageAccess2"); + } + if (requested.nullDescriptor && !supported.nullDescriptor) { + error_list.push_back("Missing feature VkPhysicalDeviceRobustness2FeaturesEXT::nullDescriptor"); + } +} +void merge_VkPhysicalDeviceRobustness2FeaturesEXT(VkPhysicalDeviceRobustness2FeaturesEXT & current, VkPhysicalDeviceRobustness2FeaturesEXT const& merge_in) { + current.robustBufferAccess2 = current.robustBufferAccess2 || merge_in.robustBufferAccess2; + current.robustImageAccess2 = current.robustImageAccess2 || merge_in.robustImageAccess2; + current.nullDescriptor = current.nullDescriptor || merge_in.nullDescriptor; +} +#endif //(defined(VK_EXT_robustness2)) +#if (defined(VK_KHR_present_mode_fifo_latest_ready)) +void compare_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR(std::vector & error_list, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR const& supported, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR const& requested) { + if (requested.presentModeFifoLatestReady && !supported.presentModeFifoLatestReady) { + error_list.push_back("Missing feature VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR::presentModeFifoLatestReady"); + } +} +void merge_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR(VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR & current, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR const& merge_in) { + current.presentModeFifoLatestReady = current.presentModeFifoLatestReady || merge_in.presentModeFifoLatestReady; +} +#endif //(defined(VK_KHR_present_mode_fifo_latest_ready)) +#if (defined(VK_EXT_present_mode_fifo_latest_ready)) +void compare_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT(std::vector & error_list, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT const& supported, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT const& requested) { + if (requested.presentModeFifoLatestReady && !supported.presentModeFifoLatestReady) { + error_list.push_back("Missing feature VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT::presentModeFifoLatestReady"); + } +} +void merge_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT(VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT & current, VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT const& merge_in) { + current.presentModeFifoLatestReady = current.presentModeFifoLatestReady || merge_in.presentModeFifoLatestReady; +} +#endif //(defined(VK_EXT_present_mode_fifo_latest_ready)) +#if (defined(VK_KHR_maintenance10)) +void compare_VkPhysicalDeviceMaintenance10FeaturesKHR(std::vector & error_list, VkPhysicalDeviceMaintenance10FeaturesKHR const& supported, VkPhysicalDeviceMaintenance10FeaturesKHR const& requested) { + if (requested.maintenance10 && !supported.maintenance10) { + error_list.push_back("Missing feature VkPhysicalDeviceMaintenance10FeaturesKHR::maintenance10"); + } +} +void merge_VkPhysicalDeviceMaintenance10FeaturesKHR(VkPhysicalDeviceMaintenance10FeaturesKHR & current, VkPhysicalDeviceMaintenance10FeaturesKHR const& merge_in) { + current.maintenance10 = current.maintenance10 || merge_in.maintenance10; +} +#endif //(defined(VK_KHR_maintenance10)) +#if (defined(VK_EXT_transform_feedback)) +void compare_VkPhysicalDeviceTransformFeedbackFeaturesEXT(std::vector & error_list, VkPhysicalDeviceTransformFeedbackFeaturesEXT const& supported, VkPhysicalDeviceTransformFeedbackFeaturesEXT const& requested) { + if (requested.transformFeedback && !supported.transformFeedback) { + error_list.push_back("Missing feature VkPhysicalDeviceTransformFeedbackFeaturesEXT::transformFeedback"); + } + if (requested.geometryStreams && !supported.geometryStreams) { + error_list.push_back("Missing feature VkPhysicalDeviceTransformFeedbackFeaturesEXT::geometryStreams"); + } +} +void merge_VkPhysicalDeviceTransformFeedbackFeaturesEXT(VkPhysicalDeviceTransformFeedbackFeaturesEXT & current, VkPhysicalDeviceTransformFeedbackFeaturesEXT const& merge_in) { + current.transformFeedback = current.transformFeedback || merge_in.transformFeedback; + current.geometryStreams = current.geometryStreams || merge_in.geometryStreams; +} +#endif //(defined(VK_EXT_transform_feedback)) +#if (defined(VK_NV_corner_sampled_image)) +void compare_VkPhysicalDeviceCornerSampledImageFeaturesNV(std::vector & error_list, VkPhysicalDeviceCornerSampledImageFeaturesNV const& supported, VkPhysicalDeviceCornerSampledImageFeaturesNV const& requested) { + if (requested.cornerSampledImage && !supported.cornerSampledImage) { + error_list.push_back("Missing feature VkPhysicalDeviceCornerSampledImageFeaturesNV::cornerSampledImage"); + } +} +void merge_VkPhysicalDeviceCornerSampledImageFeaturesNV(VkPhysicalDeviceCornerSampledImageFeaturesNV & current, VkPhysicalDeviceCornerSampledImageFeaturesNV const& merge_in) { + current.cornerSampledImage = current.cornerSampledImage || merge_in.cornerSampledImage; +} +#endif //(defined(VK_NV_corner_sampled_image)) +#if (defined(VK_EXT_astc_decode_mode)) +void compare_VkPhysicalDeviceASTCDecodeFeaturesEXT(std::vector & error_list, VkPhysicalDeviceASTCDecodeFeaturesEXT const& supported, VkPhysicalDeviceASTCDecodeFeaturesEXT const& requested) { + if (requested.decodeModeSharedExponent && !supported.decodeModeSharedExponent) { + error_list.push_back("Missing feature VkPhysicalDeviceASTCDecodeFeaturesEXT::decodeModeSharedExponent"); + } +} +void merge_VkPhysicalDeviceASTCDecodeFeaturesEXT(VkPhysicalDeviceASTCDecodeFeaturesEXT & current, VkPhysicalDeviceASTCDecodeFeaturesEXT const& merge_in) { + current.decodeModeSharedExponent = current.decodeModeSharedExponent || merge_in.decodeModeSharedExponent; +} +#endif //(defined(VK_EXT_astc_decode_mode)) +#if (defined(VK_EXT_conditional_rendering)) +void compare_VkPhysicalDeviceConditionalRenderingFeaturesEXT(std::vector & error_list, VkPhysicalDeviceConditionalRenderingFeaturesEXT const& supported, VkPhysicalDeviceConditionalRenderingFeaturesEXT const& requested) { + if (requested.conditionalRendering && !supported.conditionalRendering) { + error_list.push_back("Missing feature VkPhysicalDeviceConditionalRenderingFeaturesEXT::conditionalRendering"); + } + if (requested.inheritedConditionalRendering && !supported.inheritedConditionalRendering) { + error_list.push_back("Missing feature VkPhysicalDeviceConditionalRenderingFeaturesEXT::inheritedConditionalRendering"); + } +} +void merge_VkPhysicalDeviceConditionalRenderingFeaturesEXT(VkPhysicalDeviceConditionalRenderingFeaturesEXT & current, VkPhysicalDeviceConditionalRenderingFeaturesEXT const& merge_in) { + current.conditionalRendering = current.conditionalRendering || merge_in.conditionalRendering; + current.inheritedConditionalRendering = current.inheritedConditionalRendering || merge_in.inheritedConditionalRendering; +} +#endif //(defined(VK_EXT_conditional_rendering)) +#if (defined(VK_EXT_depth_clip_enable)) +void compare_VkPhysicalDeviceDepthClipEnableFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthClipEnableFeaturesEXT const& supported, VkPhysicalDeviceDepthClipEnableFeaturesEXT const& requested) { + if (requested.depthClipEnable && !supported.depthClipEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthClipEnableFeaturesEXT::depthClipEnable"); + } +} +void merge_VkPhysicalDeviceDepthClipEnableFeaturesEXT(VkPhysicalDeviceDepthClipEnableFeaturesEXT & current, VkPhysicalDeviceDepthClipEnableFeaturesEXT const& merge_in) { + current.depthClipEnable = current.depthClipEnable || merge_in.depthClipEnable; +} +#endif //(defined(VK_EXT_depth_clip_enable)) +#if (defined(VK_IMG_relaxed_line_rasterization)) +void compare_VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG(std::vector & error_list, VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG const& supported, VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG const& requested) { + if (requested.relaxedLineRasterization && !supported.relaxedLineRasterization) { + error_list.push_back("Missing feature VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG::relaxedLineRasterization"); + } +} +void merge_VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG(VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG & current, VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG const& merge_in) { + current.relaxedLineRasterization = current.relaxedLineRasterization || merge_in.relaxedLineRasterization; +} +#endif //(defined(VK_IMG_relaxed_line_rasterization)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_shader_enqueue)) +void compare_VkPhysicalDeviceShaderEnqueueFeaturesAMDX(std::vector & error_list, VkPhysicalDeviceShaderEnqueueFeaturesAMDX const& supported, VkPhysicalDeviceShaderEnqueueFeaturesAMDX const& requested) { + if (requested.shaderEnqueue && !supported.shaderEnqueue) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderEnqueueFeaturesAMDX::shaderEnqueue"); + } + if (requested.shaderMeshEnqueue && !supported.shaderMeshEnqueue) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderEnqueueFeaturesAMDX::shaderMeshEnqueue"); + } +} +void merge_VkPhysicalDeviceShaderEnqueueFeaturesAMDX(VkPhysicalDeviceShaderEnqueueFeaturesAMDX & current, VkPhysicalDeviceShaderEnqueueFeaturesAMDX const& merge_in) { + current.shaderEnqueue = current.shaderEnqueue || merge_in.shaderEnqueue; + current.shaderMeshEnqueue = current.shaderMeshEnqueue || merge_in.shaderMeshEnqueue; +} +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_shader_enqueue)) +#if (defined(VK_EXT_blend_operation_advanced)) +void compare_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(std::vector & error_list, VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const& supported, VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const& requested) { + if (requested.advancedBlendCoherentOperations && !supported.advancedBlendCoherentOperations) { + error_list.push_back("Missing feature VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT::advancedBlendCoherentOperations"); + } +} +void merge_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT & current, VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const& merge_in) { + current.advancedBlendCoherentOperations = current.advancedBlendCoherentOperations || merge_in.advancedBlendCoherentOperations; +} +#endif //(defined(VK_EXT_blend_operation_advanced)) +#if (defined(VK_NV_shader_sm_builtins)) +void compare_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(std::vector & error_list, VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const& supported, VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const& requested) { + if (requested.shaderSMBuiltins && !supported.shaderSMBuiltins) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderSMBuiltinsFeaturesNV::shaderSMBuiltins"); + } +} +void merge_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(VkPhysicalDeviceShaderSMBuiltinsFeaturesNV & current, VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const& merge_in) { + current.shaderSMBuiltins = current.shaderSMBuiltins || merge_in.shaderSMBuiltins; +} +#endif //(defined(VK_NV_shader_sm_builtins)) +#if (defined(VK_NV_shading_rate_image)) +void compare_VkPhysicalDeviceShadingRateImageFeaturesNV(std::vector & error_list, VkPhysicalDeviceShadingRateImageFeaturesNV const& supported, VkPhysicalDeviceShadingRateImageFeaturesNV const& requested) { + if (requested.shadingRateImage && !supported.shadingRateImage) { + error_list.push_back("Missing feature VkPhysicalDeviceShadingRateImageFeaturesNV::shadingRateImage"); + } + if (requested.shadingRateCoarseSampleOrder && !supported.shadingRateCoarseSampleOrder) { + error_list.push_back("Missing feature VkPhysicalDeviceShadingRateImageFeaturesNV::shadingRateCoarseSampleOrder"); + } +} +void merge_VkPhysicalDeviceShadingRateImageFeaturesNV(VkPhysicalDeviceShadingRateImageFeaturesNV & current, VkPhysicalDeviceShadingRateImageFeaturesNV const& merge_in) { + current.shadingRateImage = current.shadingRateImage || merge_in.shadingRateImage; + current.shadingRateCoarseSampleOrder = current.shadingRateCoarseSampleOrder || merge_in.shadingRateCoarseSampleOrder; +} +#endif //(defined(VK_NV_shading_rate_image)) +#if (defined(VK_NV_representative_fragment_test)) +void compare_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(std::vector & error_list, VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const& supported, VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const& requested) { + if (requested.representativeFragmentTest && !supported.representativeFragmentTest) { + error_list.push_back("Missing feature VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV::representativeFragmentTest"); + } +} +void merge_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV & current, VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const& merge_in) { + current.representativeFragmentTest = current.representativeFragmentTest || merge_in.representativeFragmentTest; +} +#endif //(defined(VK_NV_representative_fragment_test)) +#if (defined(VK_NV_mesh_shader)) +void compare_VkPhysicalDeviceMeshShaderFeaturesNV(std::vector & error_list, VkPhysicalDeviceMeshShaderFeaturesNV const& supported, VkPhysicalDeviceMeshShaderFeaturesNV const& requested) { + if (requested.taskShader && !supported.taskShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMeshShaderFeaturesNV::taskShader"); + } + if (requested.meshShader && !supported.meshShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMeshShaderFeaturesNV::meshShader"); + } +} +void merge_VkPhysicalDeviceMeshShaderFeaturesNV(VkPhysicalDeviceMeshShaderFeaturesNV & current, VkPhysicalDeviceMeshShaderFeaturesNV const& merge_in) { + current.taskShader = current.taskShader || merge_in.taskShader; + current.meshShader = current.meshShader || merge_in.meshShader; +} +#endif //(defined(VK_NV_mesh_shader)) +#if (defined(VK_NV_shader_image_footprint)) +void compare_VkPhysicalDeviceShaderImageFootprintFeaturesNV(std::vector & error_list, VkPhysicalDeviceShaderImageFootprintFeaturesNV const& supported, VkPhysicalDeviceShaderImageFootprintFeaturesNV const& requested) { + if (requested.imageFootprint && !supported.imageFootprint) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderImageFootprintFeaturesNV::imageFootprint"); + } +} +void merge_VkPhysicalDeviceShaderImageFootprintFeaturesNV(VkPhysicalDeviceShaderImageFootprintFeaturesNV & current, VkPhysicalDeviceShaderImageFootprintFeaturesNV const& merge_in) { + current.imageFootprint = current.imageFootprint || merge_in.imageFootprint; +} +#endif //(defined(VK_NV_shader_image_footprint)) +#if (defined(VK_NV_scissor_exclusive)) +void compare_VkPhysicalDeviceExclusiveScissorFeaturesNV(std::vector & error_list, VkPhysicalDeviceExclusiveScissorFeaturesNV const& supported, VkPhysicalDeviceExclusiveScissorFeaturesNV const& requested) { + if (requested.exclusiveScissor && !supported.exclusiveScissor) { + error_list.push_back("Missing feature VkPhysicalDeviceExclusiveScissorFeaturesNV::exclusiveScissor"); + } +} +void merge_VkPhysicalDeviceExclusiveScissorFeaturesNV(VkPhysicalDeviceExclusiveScissorFeaturesNV & current, VkPhysicalDeviceExclusiveScissorFeaturesNV const& merge_in) { + current.exclusiveScissor = current.exclusiveScissor || merge_in.exclusiveScissor; +} +#endif //(defined(VK_NV_scissor_exclusive)) +#if (defined(VK_INTEL_shader_integer_functions2)) +void compare_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(std::vector & error_list, VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& supported, VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& requested) { + if (requested.shaderIntegerFunctions2 && !supported.shaderIntegerFunctions2) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL::shaderIntegerFunctions2"); + } +} +void merge_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL & current, VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& merge_in) { + current.shaderIntegerFunctions2 = current.shaderIntegerFunctions2 || merge_in.shaderIntegerFunctions2; +} +#endif //(defined(VK_INTEL_shader_integer_functions2)) +#if (defined(VK_EXT_fragment_density_map)) +void compare_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFragmentDensityMapFeaturesEXT const& supported, VkPhysicalDeviceFragmentDensityMapFeaturesEXT const& requested) { + if (requested.fragmentDensityMap && !supported.fragmentDensityMap) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentDensityMapFeaturesEXT::fragmentDensityMap"); + } + if (requested.fragmentDensityMapDynamic && !supported.fragmentDensityMapDynamic) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentDensityMapFeaturesEXT::fragmentDensityMapDynamic"); + } + if (requested.fragmentDensityMapNonSubsampledImages && !supported.fragmentDensityMapNonSubsampledImages) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentDensityMapFeaturesEXT::fragmentDensityMapNonSubsampledImages"); + } +} +void merge_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(VkPhysicalDeviceFragmentDensityMapFeaturesEXT & current, VkPhysicalDeviceFragmentDensityMapFeaturesEXT const& merge_in) { + current.fragmentDensityMap = current.fragmentDensityMap || merge_in.fragmentDensityMap; + current.fragmentDensityMapDynamic = current.fragmentDensityMapDynamic || merge_in.fragmentDensityMapDynamic; + current.fragmentDensityMapNonSubsampledImages = current.fragmentDensityMapNonSubsampledImages || merge_in.fragmentDensityMapNonSubsampledImages; +} +#endif //(defined(VK_EXT_fragment_density_map)) +#if (defined(VK_AMD_device_coherent_memory)) +void compare_VkPhysicalDeviceCoherentMemoryFeaturesAMD(std::vector & error_list, VkPhysicalDeviceCoherentMemoryFeaturesAMD const& supported, VkPhysicalDeviceCoherentMemoryFeaturesAMD const& requested) { + if (requested.deviceCoherentMemory && !supported.deviceCoherentMemory) { + error_list.push_back("Missing feature VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory"); + } +} +void merge_VkPhysicalDeviceCoherentMemoryFeaturesAMD(VkPhysicalDeviceCoherentMemoryFeaturesAMD & current, VkPhysicalDeviceCoherentMemoryFeaturesAMD const& merge_in) { + current.deviceCoherentMemory = current.deviceCoherentMemory || merge_in.deviceCoherentMemory; +} +#endif //(defined(VK_AMD_device_coherent_memory)) +#if (defined(VK_EXT_shader_image_atomic_int64)) +void compare_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& supported, VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& requested) { + if (requested.shaderImageInt64Atomics && !supported.shaderImageInt64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT::shaderImageInt64Atomics"); + } + if (requested.sparseImageInt64Atomics && !supported.sparseImageInt64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT::sparseImageInt64Atomics"); + } +} +void merge_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT & current, VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& merge_in) { + current.shaderImageInt64Atomics = current.shaderImageInt64Atomics || merge_in.shaderImageInt64Atomics; + current.sparseImageInt64Atomics = current.sparseImageInt64Atomics || merge_in.sparseImageInt64Atomics; +} +#endif //(defined(VK_EXT_shader_image_atomic_int64)) +#if (defined(VK_EXT_memory_priority)) +void compare_VkPhysicalDeviceMemoryPriorityFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMemoryPriorityFeaturesEXT const& supported, VkPhysicalDeviceMemoryPriorityFeaturesEXT const& requested) { + if (requested.memoryPriority && !supported.memoryPriority) { + error_list.push_back("Missing feature VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority"); + } +} +void merge_VkPhysicalDeviceMemoryPriorityFeaturesEXT(VkPhysicalDeviceMemoryPriorityFeaturesEXT & current, VkPhysicalDeviceMemoryPriorityFeaturesEXT const& merge_in) { + current.memoryPriority = current.memoryPriority || merge_in.memoryPriority; +} +#endif //(defined(VK_EXT_memory_priority)) +#if (defined(VK_NV_dedicated_allocation_image_aliasing)) +void compare_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(std::vector & error_list, VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& supported, VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& requested) { + if (requested.dedicatedAllocationImageAliasing && !supported.dedicatedAllocationImageAliasing) { + error_list.push_back("Missing feature VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV::dedicatedAllocationImageAliasing"); + } +} +void merge_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV & current, VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& merge_in) { + current.dedicatedAllocationImageAliasing = current.dedicatedAllocationImageAliasing || merge_in.dedicatedAllocationImageAliasing; +} +#endif //(defined(VK_NV_dedicated_allocation_image_aliasing)) +#if (defined(VK_EXT_buffer_device_address)) +void compare_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(std::vector & error_list, VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const& supported, VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const& requested) { + if (requested.bufferDeviceAddress && !supported.bufferDeviceAddress) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeaturesEXT::bufferDeviceAddress"); + } + if (requested.bufferDeviceAddressCaptureReplay && !supported.bufferDeviceAddressCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeaturesEXT::bufferDeviceAddressCaptureReplay"); + } + if (requested.bufferDeviceAddressMultiDevice && !supported.bufferDeviceAddressMultiDevice) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferDeviceAddressFeaturesEXT::bufferDeviceAddressMultiDevice"); + } +} +void merge_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(VkPhysicalDeviceBufferDeviceAddressFeaturesEXT & current, VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const& merge_in) { + current.bufferDeviceAddress = current.bufferDeviceAddress || merge_in.bufferDeviceAddress; + current.bufferDeviceAddressCaptureReplay = current.bufferDeviceAddressCaptureReplay || merge_in.bufferDeviceAddressCaptureReplay; + current.bufferDeviceAddressMultiDevice = current.bufferDeviceAddressMultiDevice || merge_in.bufferDeviceAddressMultiDevice; +} +#endif //(defined(VK_EXT_buffer_device_address)) +#if (defined(VK_EXT_buffer_device_address)) +void compare_VkPhysicalDeviceBufferAddressFeaturesEXT(std::vector & error_list, VkPhysicalDeviceBufferAddressFeaturesEXT const& supported, VkPhysicalDeviceBufferAddressFeaturesEXT const& requested) { + if (requested.bufferDeviceAddress && !supported.bufferDeviceAddress) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferAddressFeaturesEXT::bufferDeviceAddress"); + } + if (requested.bufferDeviceAddressCaptureReplay && !supported.bufferDeviceAddressCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferAddressFeaturesEXT::bufferDeviceAddressCaptureReplay"); + } + if (requested.bufferDeviceAddressMultiDevice && !supported.bufferDeviceAddressMultiDevice) { + error_list.push_back("Missing feature VkPhysicalDeviceBufferAddressFeaturesEXT::bufferDeviceAddressMultiDevice"); + } +} +void merge_VkPhysicalDeviceBufferAddressFeaturesEXT(VkPhysicalDeviceBufferAddressFeaturesEXT & current, VkPhysicalDeviceBufferAddressFeaturesEXT const& merge_in) { + current.bufferDeviceAddress = current.bufferDeviceAddress || merge_in.bufferDeviceAddress; + current.bufferDeviceAddressCaptureReplay = current.bufferDeviceAddressCaptureReplay || merge_in.bufferDeviceAddressCaptureReplay; + current.bufferDeviceAddressMultiDevice = current.bufferDeviceAddressMultiDevice || merge_in.bufferDeviceAddressMultiDevice; +} +#endif //(defined(VK_EXT_buffer_device_address)) +#if (defined(VK_NV_cooperative_matrix)) +void compare_VkPhysicalDeviceCooperativeMatrixFeaturesNV(std::vector & error_list, VkPhysicalDeviceCooperativeMatrixFeaturesNV const& supported, VkPhysicalDeviceCooperativeMatrixFeaturesNV const& requested) { + if (requested.cooperativeMatrix && !supported.cooperativeMatrix) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrixFeaturesNV::cooperativeMatrix"); + } + if (requested.cooperativeMatrixRobustBufferAccess && !supported.cooperativeMatrixRobustBufferAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrixFeaturesNV::cooperativeMatrixRobustBufferAccess"); + } +} +void merge_VkPhysicalDeviceCooperativeMatrixFeaturesNV(VkPhysicalDeviceCooperativeMatrixFeaturesNV & current, VkPhysicalDeviceCooperativeMatrixFeaturesNV const& merge_in) { + current.cooperativeMatrix = current.cooperativeMatrix || merge_in.cooperativeMatrix; + current.cooperativeMatrixRobustBufferAccess = current.cooperativeMatrixRobustBufferAccess || merge_in.cooperativeMatrixRobustBufferAccess; +} +#endif //(defined(VK_NV_cooperative_matrix)) +#if (defined(VK_NV_coverage_reduction_mode)) +void compare_VkPhysicalDeviceCoverageReductionModeFeaturesNV(std::vector & error_list, VkPhysicalDeviceCoverageReductionModeFeaturesNV const& supported, VkPhysicalDeviceCoverageReductionModeFeaturesNV const& requested) { + if (requested.coverageReductionMode && !supported.coverageReductionMode) { + error_list.push_back("Missing feature VkPhysicalDeviceCoverageReductionModeFeaturesNV::coverageReductionMode"); + } +} +void merge_VkPhysicalDeviceCoverageReductionModeFeaturesNV(VkPhysicalDeviceCoverageReductionModeFeaturesNV & current, VkPhysicalDeviceCoverageReductionModeFeaturesNV const& merge_in) { + current.coverageReductionMode = current.coverageReductionMode || merge_in.coverageReductionMode; +} +#endif //(defined(VK_NV_coverage_reduction_mode)) +#if (defined(VK_EXT_fragment_shader_interlock)) +void compare_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const& supported, VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const& requested) { + if (requested.fragmentShaderSampleInterlock && !supported.fragmentShaderSampleInterlock) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT::fragmentShaderSampleInterlock"); + } + if (requested.fragmentShaderPixelInterlock && !supported.fragmentShaderPixelInterlock) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT::fragmentShaderPixelInterlock"); + } + if (requested.fragmentShaderShadingRateInterlock && !supported.fragmentShaderShadingRateInterlock) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT::fragmentShaderShadingRateInterlock"); + } +} +void merge_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT & current, VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const& merge_in) { + current.fragmentShaderSampleInterlock = current.fragmentShaderSampleInterlock || merge_in.fragmentShaderSampleInterlock; + current.fragmentShaderPixelInterlock = current.fragmentShaderPixelInterlock || merge_in.fragmentShaderPixelInterlock; + current.fragmentShaderShadingRateInterlock = current.fragmentShaderShadingRateInterlock || merge_in.fragmentShaderShadingRateInterlock; +} +#endif //(defined(VK_EXT_fragment_shader_interlock)) +#if (defined(VK_EXT_ycbcr_image_arrays)) +void compare_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(std::vector & error_list, VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const& supported, VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const& requested) { + if (requested.ycbcrImageArrays && !supported.ycbcrImageArrays) { + error_list.push_back("Missing feature VkPhysicalDeviceYcbcrImageArraysFeaturesEXT::ycbcrImageArrays"); + } +} +void merge_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(VkPhysicalDeviceYcbcrImageArraysFeaturesEXT & current, VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const& merge_in) { + current.ycbcrImageArrays = current.ycbcrImageArrays || merge_in.ycbcrImageArrays; +} +#endif //(defined(VK_EXT_ycbcr_image_arrays)) +#if (defined(VK_EXT_provoking_vertex)) +void compare_VkPhysicalDeviceProvokingVertexFeaturesEXT(std::vector & error_list, VkPhysicalDeviceProvokingVertexFeaturesEXT const& supported, VkPhysicalDeviceProvokingVertexFeaturesEXT const& requested) { + if (requested.provokingVertexLast && !supported.provokingVertexLast) { + error_list.push_back("Missing feature VkPhysicalDeviceProvokingVertexFeaturesEXT::provokingVertexLast"); + } + if (requested.transformFeedbackPreservesProvokingVertex && !supported.transformFeedbackPreservesProvokingVertex) { + error_list.push_back("Missing feature VkPhysicalDeviceProvokingVertexFeaturesEXT::transformFeedbackPreservesProvokingVertex"); + } +} +void merge_VkPhysicalDeviceProvokingVertexFeaturesEXT(VkPhysicalDeviceProvokingVertexFeaturesEXT & current, VkPhysicalDeviceProvokingVertexFeaturesEXT const& merge_in) { + current.provokingVertexLast = current.provokingVertexLast || merge_in.provokingVertexLast; + current.transformFeedbackPreservesProvokingVertex = current.transformFeedbackPreservesProvokingVertex || merge_in.transformFeedbackPreservesProvokingVertex; +} +#endif //(defined(VK_EXT_provoking_vertex)) +#if (defined(VK_EXT_shader_atomic_float)) +void compare_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const& supported, VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const& requested) { + if (requested.shaderBufferFloat32Atomics && !supported.shaderBufferFloat32Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderBufferFloat32Atomics"); + } + if (requested.shaderBufferFloat32AtomicAdd && !supported.shaderBufferFloat32AtomicAdd) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderBufferFloat32AtomicAdd"); + } + if (requested.shaderBufferFloat64Atomics && !supported.shaderBufferFloat64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderBufferFloat64Atomics"); + } + if (requested.shaderBufferFloat64AtomicAdd && !supported.shaderBufferFloat64AtomicAdd) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderBufferFloat64AtomicAdd"); + } + if (requested.shaderSharedFloat32Atomics && !supported.shaderSharedFloat32Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderSharedFloat32Atomics"); + } + if (requested.shaderSharedFloat32AtomicAdd && !supported.shaderSharedFloat32AtomicAdd) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderSharedFloat32AtomicAdd"); + } + if (requested.shaderSharedFloat64Atomics && !supported.shaderSharedFloat64Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderSharedFloat64Atomics"); + } + if (requested.shaderSharedFloat64AtomicAdd && !supported.shaderSharedFloat64AtomicAdd) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderSharedFloat64AtomicAdd"); + } + if (requested.shaderImageFloat32Atomics && !supported.shaderImageFloat32Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderImageFloat32Atomics"); + } + if (requested.shaderImageFloat32AtomicAdd && !supported.shaderImageFloat32AtomicAdd) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::shaderImageFloat32AtomicAdd"); + } + if (requested.sparseImageFloat32Atomics && !supported.sparseImageFloat32Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::sparseImageFloat32Atomics"); + } + if (requested.sparseImageFloat32AtomicAdd && !supported.sparseImageFloat32AtomicAdd) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloatFeaturesEXT::sparseImageFloat32AtomicAdd"); + } +} +void merge_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(VkPhysicalDeviceShaderAtomicFloatFeaturesEXT & current, VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const& merge_in) { + current.shaderBufferFloat32Atomics = current.shaderBufferFloat32Atomics || merge_in.shaderBufferFloat32Atomics; + current.shaderBufferFloat32AtomicAdd = current.shaderBufferFloat32AtomicAdd || merge_in.shaderBufferFloat32AtomicAdd; + current.shaderBufferFloat64Atomics = current.shaderBufferFloat64Atomics || merge_in.shaderBufferFloat64Atomics; + current.shaderBufferFloat64AtomicAdd = current.shaderBufferFloat64AtomicAdd || merge_in.shaderBufferFloat64AtomicAdd; + current.shaderSharedFloat32Atomics = current.shaderSharedFloat32Atomics || merge_in.shaderSharedFloat32Atomics; + current.shaderSharedFloat32AtomicAdd = current.shaderSharedFloat32AtomicAdd || merge_in.shaderSharedFloat32AtomicAdd; + current.shaderSharedFloat64Atomics = current.shaderSharedFloat64Atomics || merge_in.shaderSharedFloat64Atomics; + current.shaderSharedFloat64AtomicAdd = current.shaderSharedFloat64AtomicAdd || merge_in.shaderSharedFloat64AtomicAdd; + current.shaderImageFloat32Atomics = current.shaderImageFloat32Atomics || merge_in.shaderImageFloat32Atomics; + current.shaderImageFloat32AtomicAdd = current.shaderImageFloat32AtomicAdd || merge_in.shaderImageFloat32AtomicAdd; + current.sparseImageFloat32Atomics = current.sparseImageFloat32Atomics || merge_in.sparseImageFloat32Atomics; + current.sparseImageFloat32AtomicAdd = current.sparseImageFloat32AtomicAdd || merge_in.sparseImageFloat32AtomicAdd; +} +#endif //(defined(VK_EXT_shader_atomic_float)) +#if (defined(VK_EXT_extended_dynamic_state)) +void compare_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(std::vector & error_list, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const& supported, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const& requested) { + if (requested.extendedDynamicState && !supported.extendedDynamicState) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicStateFeaturesEXT::extendedDynamicState"); + } +} +void merge_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT & current, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const& merge_in) { + current.extendedDynamicState = current.extendedDynamicState || merge_in.extendedDynamicState; +} +#endif //(defined(VK_EXT_extended_dynamic_state)) +#if (defined(VK_EXT_map_memory_placed)) +void compare_VkPhysicalDeviceMapMemoryPlacedFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMapMemoryPlacedFeaturesEXT const& supported, VkPhysicalDeviceMapMemoryPlacedFeaturesEXT const& requested) { + if (requested.memoryMapPlaced && !supported.memoryMapPlaced) { + error_list.push_back("Missing feature VkPhysicalDeviceMapMemoryPlacedFeaturesEXT::memoryMapPlaced"); + } + if (requested.memoryMapRangePlaced && !supported.memoryMapRangePlaced) { + error_list.push_back("Missing feature VkPhysicalDeviceMapMemoryPlacedFeaturesEXT::memoryMapRangePlaced"); + } + if (requested.memoryUnmapReserve && !supported.memoryUnmapReserve) { + error_list.push_back("Missing feature VkPhysicalDeviceMapMemoryPlacedFeaturesEXT::memoryUnmapReserve"); + } +} +void merge_VkPhysicalDeviceMapMemoryPlacedFeaturesEXT(VkPhysicalDeviceMapMemoryPlacedFeaturesEXT & current, VkPhysicalDeviceMapMemoryPlacedFeaturesEXT const& merge_in) { + current.memoryMapPlaced = current.memoryMapPlaced || merge_in.memoryMapPlaced; + current.memoryMapRangePlaced = current.memoryMapRangePlaced || merge_in.memoryMapRangePlaced; + current.memoryUnmapReserve = current.memoryUnmapReserve || merge_in.memoryUnmapReserve; +} +#endif //(defined(VK_EXT_map_memory_placed)) +#if (defined(VK_EXT_shader_atomic_float2)) +void compare_VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT const& supported, VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT const& requested) { + if (requested.shaderBufferFloat16Atomics && !supported.shaderBufferFloat16Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderBufferFloat16Atomics"); + } + if (requested.shaderBufferFloat16AtomicAdd && !supported.shaderBufferFloat16AtomicAdd) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderBufferFloat16AtomicAdd"); + } + if (requested.shaderBufferFloat16AtomicMinMax && !supported.shaderBufferFloat16AtomicMinMax) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderBufferFloat16AtomicMinMax"); + } + if (requested.shaderBufferFloat32AtomicMinMax && !supported.shaderBufferFloat32AtomicMinMax) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderBufferFloat32AtomicMinMax"); + } + if (requested.shaderBufferFloat64AtomicMinMax && !supported.shaderBufferFloat64AtomicMinMax) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderBufferFloat64AtomicMinMax"); + } + if (requested.shaderSharedFloat16Atomics && !supported.shaderSharedFloat16Atomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderSharedFloat16Atomics"); + } + if (requested.shaderSharedFloat16AtomicAdd && !supported.shaderSharedFloat16AtomicAdd) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderSharedFloat16AtomicAdd"); + } + if (requested.shaderSharedFloat16AtomicMinMax && !supported.shaderSharedFloat16AtomicMinMax) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderSharedFloat16AtomicMinMax"); + } + if (requested.shaderSharedFloat32AtomicMinMax && !supported.shaderSharedFloat32AtomicMinMax) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderSharedFloat32AtomicMinMax"); + } + if (requested.shaderSharedFloat64AtomicMinMax && !supported.shaderSharedFloat64AtomicMinMax) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderSharedFloat64AtomicMinMax"); + } + if (requested.shaderImageFloat32AtomicMinMax && !supported.shaderImageFloat32AtomicMinMax) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::shaderImageFloat32AtomicMinMax"); + } + if (requested.sparseImageFloat32AtomicMinMax && !supported.sparseImageFloat32AtomicMinMax) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT::sparseImageFloat32AtomicMinMax"); + } +} +void merge_VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT(VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT & current, VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT const& merge_in) { + current.shaderBufferFloat16Atomics = current.shaderBufferFloat16Atomics || merge_in.shaderBufferFloat16Atomics; + current.shaderBufferFloat16AtomicAdd = current.shaderBufferFloat16AtomicAdd || merge_in.shaderBufferFloat16AtomicAdd; + current.shaderBufferFloat16AtomicMinMax = current.shaderBufferFloat16AtomicMinMax || merge_in.shaderBufferFloat16AtomicMinMax; + current.shaderBufferFloat32AtomicMinMax = current.shaderBufferFloat32AtomicMinMax || merge_in.shaderBufferFloat32AtomicMinMax; + current.shaderBufferFloat64AtomicMinMax = current.shaderBufferFloat64AtomicMinMax || merge_in.shaderBufferFloat64AtomicMinMax; + current.shaderSharedFloat16Atomics = current.shaderSharedFloat16Atomics || merge_in.shaderSharedFloat16Atomics; + current.shaderSharedFloat16AtomicAdd = current.shaderSharedFloat16AtomicAdd || merge_in.shaderSharedFloat16AtomicAdd; + current.shaderSharedFloat16AtomicMinMax = current.shaderSharedFloat16AtomicMinMax || merge_in.shaderSharedFloat16AtomicMinMax; + current.shaderSharedFloat32AtomicMinMax = current.shaderSharedFloat32AtomicMinMax || merge_in.shaderSharedFloat32AtomicMinMax; + current.shaderSharedFloat64AtomicMinMax = current.shaderSharedFloat64AtomicMinMax || merge_in.shaderSharedFloat64AtomicMinMax; + current.shaderImageFloat32AtomicMinMax = current.shaderImageFloat32AtomicMinMax || merge_in.shaderImageFloat32AtomicMinMax; + current.sparseImageFloat32AtomicMinMax = current.sparseImageFloat32AtomicMinMax || merge_in.sparseImageFloat32AtomicMinMax; +} +#endif //(defined(VK_EXT_shader_atomic_float2)) +#if (defined(VK_NV_device_generated_commands)) +void compare_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(std::vector & error_list, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& supported, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& requested) { + if (requested.deviceGeneratedCommands && !supported.deviceGeneratedCommands) { + error_list.push_back("Missing feature VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV::deviceGeneratedCommands"); + } +} +void merge_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV & current, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& merge_in) { + current.deviceGeneratedCommands = current.deviceGeneratedCommands || merge_in.deviceGeneratedCommands; +} +#endif //(defined(VK_NV_device_generated_commands)) +#if (defined(VK_NV_inherited_viewport_scissor)) +void compare_VkPhysicalDeviceInheritedViewportScissorFeaturesNV(std::vector & error_list, VkPhysicalDeviceInheritedViewportScissorFeaturesNV const& supported, VkPhysicalDeviceInheritedViewportScissorFeaturesNV const& requested) { + if (requested.inheritedViewportScissor2D && !supported.inheritedViewportScissor2D) { + error_list.push_back("Missing feature VkPhysicalDeviceInheritedViewportScissorFeaturesNV::inheritedViewportScissor2D"); + } +} +void merge_VkPhysicalDeviceInheritedViewportScissorFeaturesNV(VkPhysicalDeviceInheritedViewportScissorFeaturesNV & current, VkPhysicalDeviceInheritedViewportScissorFeaturesNV const& merge_in) { + current.inheritedViewportScissor2D = current.inheritedViewportScissor2D || merge_in.inheritedViewportScissor2D; +} +#endif //(defined(VK_NV_inherited_viewport_scissor)) +#if (defined(VK_EXT_texel_buffer_alignment)) +void compare_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(std::vector & error_list, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const& supported, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const& requested) { + if (requested.texelBufferAlignment && !supported.texelBufferAlignment) { + error_list.push_back("Missing feature VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT::texelBufferAlignment"); + } +} +void merge_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT & current, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const& merge_in) { + current.texelBufferAlignment = current.texelBufferAlignment || merge_in.texelBufferAlignment; +} +#endif //(defined(VK_EXT_texel_buffer_alignment)) +#if (defined(VK_EXT_depth_bias_control)) +void compare_VkPhysicalDeviceDepthBiasControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthBiasControlFeaturesEXT const& supported, VkPhysicalDeviceDepthBiasControlFeaturesEXT const& requested) { + if (requested.depthBiasControl && !supported.depthBiasControl) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthBiasControlFeaturesEXT::depthBiasControl"); + } + if (requested.leastRepresentableValueForceUnormRepresentation && !supported.leastRepresentableValueForceUnormRepresentation) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthBiasControlFeaturesEXT::leastRepresentableValueForceUnormRepresentation"); + } + if (requested.floatRepresentation && !supported.floatRepresentation) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthBiasControlFeaturesEXT::floatRepresentation"); + } + if (requested.depthBiasExact && !supported.depthBiasExact) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthBiasControlFeaturesEXT::depthBiasExact"); + } +} +void merge_VkPhysicalDeviceDepthBiasControlFeaturesEXT(VkPhysicalDeviceDepthBiasControlFeaturesEXT & current, VkPhysicalDeviceDepthBiasControlFeaturesEXT const& merge_in) { + current.depthBiasControl = current.depthBiasControl || merge_in.depthBiasControl; + current.leastRepresentableValueForceUnormRepresentation = current.leastRepresentableValueForceUnormRepresentation || merge_in.leastRepresentableValueForceUnormRepresentation; + current.floatRepresentation = current.floatRepresentation || merge_in.floatRepresentation; + current.depthBiasExact = current.depthBiasExact || merge_in.depthBiasExact; +} +#endif //(defined(VK_EXT_depth_bias_control)) +#if (defined(VK_EXT_device_memory_report)) +void compare_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const& supported, VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const& requested) { + if (requested.deviceMemoryReport && !supported.deviceMemoryReport) { + error_list.push_back("Missing feature VkPhysicalDeviceDeviceMemoryReportFeaturesEXT::deviceMemoryReport"); + } +} +void merge_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(VkPhysicalDeviceDeviceMemoryReportFeaturesEXT & current, VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const& merge_in) { + current.deviceMemoryReport = current.deviceMemoryReport || merge_in.deviceMemoryReport; +} +#endif //(defined(VK_EXT_device_memory_report)) +#if (defined(VK_EXT_custom_border_color)) +void compare_VkPhysicalDeviceCustomBorderColorFeaturesEXT(std::vector & error_list, VkPhysicalDeviceCustomBorderColorFeaturesEXT const& supported, VkPhysicalDeviceCustomBorderColorFeaturesEXT const& requested) { + if (requested.customBorderColors && !supported.customBorderColors) { + error_list.push_back("Missing feature VkPhysicalDeviceCustomBorderColorFeaturesEXT::customBorderColors"); + } + if (requested.customBorderColorWithoutFormat && !supported.customBorderColorWithoutFormat) { + error_list.push_back("Missing feature VkPhysicalDeviceCustomBorderColorFeaturesEXT::customBorderColorWithoutFormat"); + } +} +void merge_VkPhysicalDeviceCustomBorderColorFeaturesEXT(VkPhysicalDeviceCustomBorderColorFeaturesEXT & current, VkPhysicalDeviceCustomBorderColorFeaturesEXT const& merge_in) { + current.customBorderColors = current.customBorderColors || merge_in.customBorderColors; + current.customBorderColorWithoutFormat = current.customBorderColorWithoutFormat || merge_in.customBorderColorWithoutFormat; +} +#endif //(defined(VK_EXT_custom_border_color)) +#if (defined(VK_NV_present_barrier)) +void compare_VkPhysicalDevicePresentBarrierFeaturesNV(std::vector & error_list, VkPhysicalDevicePresentBarrierFeaturesNV const& supported, VkPhysicalDevicePresentBarrierFeaturesNV const& requested) { + if (requested.presentBarrier && !supported.presentBarrier) { + error_list.push_back("Missing feature VkPhysicalDevicePresentBarrierFeaturesNV::presentBarrier"); + } +} +void merge_VkPhysicalDevicePresentBarrierFeaturesNV(VkPhysicalDevicePresentBarrierFeaturesNV & current, VkPhysicalDevicePresentBarrierFeaturesNV const& merge_in) { + current.presentBarrier = current.presentBarrier || merge_in.presentBarrier; +} +#endif //(defined(VK_NV_present_barrier)) +#if (defined(VK_NV_device_diagnostics_config)) +void compare_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(std::vector & error_list, VkPhysicalDeviceDiagnosticsConfigFeaturesNV const& supported, VkPhysicalDeviceDiagnosticsConfigFeaturesNV const& requested) { + if (requested.diagnosticsConfig && !supported.diagnosticsConfig) { + error_list.push_back("Missing feature VkPhysicalDeviceDiagnosticsConfigFeaturesNV::diagnosticsConfig"); + } +} +void merge_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(VkPhysicalDeviceDiagnosticsConfigFeaturesNV & current, VkPhysicalDeviceDiagnosticsConfigFeaturesNV const& merge_in) { + current.diagnosticsConfig = current.diagnosticsConfig || merge_in.diagnosticsConfig; +} +#endif //(defined(VK_NV_device_diagnostics_config)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_cuda_kernel_launch)) +void compare_VkPhysicalDeviceCudaKernelLaunchFeaturesNV(std::vector & error_list, VkPhysicalDeviceCudaKernelLaunchFeaturesNV const& supported, VkPhysicalDeviceCudaKernelLaunchFeaturesNV const& requested) { + if (requested.cudaKernelLaunchFeatures && !supported.cudaKernelLaunchFeatures) { + error_list.push_back("Missing feature VkPhysicalDeviceCudaKernelLaunchFeaturesNV::cudaKernelLaunchFeatures"); + } +} +void merge_VkPhysicalDeviceCudaKernelLaunchFeaturesNV(VkPhysicalDeviceCudaKernelLaunchFeaturesNV & current, VkPhysicalDeviceCudaKernelLaunchFeaturesNV const& merge_in) { + current.cudaKernelLaunchFeatures = current.cudaKernelLaunchFeatures || merge_in.cudaKernelLaunchFeatures; +} +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_cuda_kernel_launch)) +#if (defined(VK_QCOM_tile_shading)) +void compare_VkPhysicalDeviceTileShadingFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceTileShadingFeaturesQCOM const& supported, VkPhysicalDeviceTileShadingFeaturesQCOM const& requested) { + if (requested.tileShading && !supported.tileShading) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShading"); + } + if (requested.tileShadingFragmentStage && !supported.tileShadingFragmentStage) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingFragmentStage"); + } + if (requested.tileShadingColorAttachments && !supported.tileShadingColorAttachments) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingColorAttachments"); + } + if (requested.tileShadingDepthAttachments && !supported.tileShadingDepthAttachments) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingDepthAttachments"); + } + if (requested.tileShadingStencilAttachments && !supported.tileShadingStencilAttachments) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingStencilAttachments"); + } + if (requested.tileShadingInputAttachments && !supported.tileShadingInputAttachments) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingInputAttachments"); + } + if (requested.tileShadingSampledAttachments && !supported.tileShadingSampledAttachments) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingSampledAttachments"); + } + if (requested.tileShadingPerTileDraw && !supported.tileShadingPerTileDraw) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingPerTileDraw"); + } + if (requested.tileShadingPerTileDispatch && !supported.tileShadingPerTileDispatch) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingPerTileDispatch"); + } + if (requested.tileShadingDispatchTile && !supported.tileShadingDispatchTile) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingDispatchTile"); + } + if (requested.tileShadingApron && !supported.tileShadingApron) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingApron"); + } + if (requested.tileShadingAnisotropicApron && !supported.tileShadingAnisotropicApron) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingAnisotropicApron"); + } + if (requested.tileShadingAtomicOps && !supported.tileShadingAtomicOps) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingAtomicOps"); + } + if (requested.tileShadingImageProcessing && !supported.tileShadingImageProcessing) { + error_list.push_back("Missing feature VkPhysicalDeviceTileShadingFeaturesQCOM::tileShadingImageProcessing"); + } +} +void merge_VkPhysicalDeviceTileShadingFeaturesQCOM(VkPhysicalDeviceTileShadingFeaturesQCOM & current, VkPhysicalDeviceTileShadingFeaturesQCOM const& merge_in) { + current.tileShading = current.tileShading || merge_in.tileShading; + current.tileShadingFragmentStage = current.tileShadingFragmentStage || merge_in.tileShadingFragmentStage; + current.tileShadingColorAttachments = current.tileShadingColorAttachments || merge_in.tileShadingColorAttachments; + current.tileShadingDepthAttachments = current.tileShadingDepthAttachments || merge_in.tileShadingDepthAttachments; + current.tileShadingStencilAttachments = current.tileShadingStencilAttachments || merge_in.tileShadingStencilAttachments; + current.tileShadingInputAttachments = current.tileShadingInputAttachments || merge_in.tileShadingInputAttachments; + current.tileShadingSampledAttachments = current.tileShadingSampledAttachments || merge_in.tileShadingSampledAttachments; + current.tileShadingPerTileDraw = current.tileShadingPerTileDraw || merge_in.tileShadingPerTileDraw; + current.tileShadingPerTileDispatch = current.tileShadingPerTileDispatch || merge_in.tileShadingPerTileDispatch; + current.tileShadingDispatchTile = current.tileShadingDispatchTile || merge_in.tileShadingDispatchTile; + current.tileShadingApron = current.tileShadingApron || merge_in.tileShadingApron; + current.tileShadingAnisotropicApron = current.tileShadingAnisotropicApron || merge_in.tileShadingAnisotropicApron; + current.tileShadingAtomicOps = current.tileShadingAtomicOps || merge_in.tileShadingAtomicOps; + current.tileShadingImageProcessing = current.tileShadingImageProcessing || merge_in.tileShadingImageProcessing; +} +#endif //(defined(VK_QCOM_tile_shading)) +#if (defined(VK_EXT_descriptor_buffer)) +void compare_VkPhysicalDeviceDescriptorBufferFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDescriptorBufferFeaturesEXT const& supported, VkPhysicalDeviceDescriptorBufferFeaturesEXT const& requested) { + if (requested.descriptorBuffer && !supported.descriptorBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorBufferFeaturesEXT::descriptorBuffer"); + } + if (requested.descriptorBufferCaptureReplay && !supported.descriptorBufferCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorBufferFeaturesEXT::descriptorBufferCaptureReplay"); + } + if (requested.descriptorBufferImageLayoutIgnored && !supported.descriptorBufferImageLayoutIgnored) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorBufferFeaturesEXT::descriptorBufferImageLayoutIgnored"); + } + if (requested.descriptorBufferPushDescriptors && !supported.descriptorBufferPushDescriptors) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorBufferFeaturesEXT::descriptorBufferPushDescriptors"); + } +} +void merge_VkPhysicalDeviceDescriptorBufferFeaturesEXT(VkPhysicalDeviceDescriptorBufferFeaturesEXT & current, VkPhysicalDeviceDescriptorBufferFeaturesEXT const& merge_in) { + current.descriptorBuffer = current.descriptorBuffer || merge_in.descriptorBuffer; + current.descriptorBufferCaptureReplay = current.descriptorBufferCaptureReplay || merge_in.descriptorBufferCaptureReplay; + current.descriptorBufferImageLayoutIgnored = current.descriptorBufferImageLayoutIgnored || merge_in.descriptorBufferImageLayoutIgnored; + current.descriptorBufferPushDescriptors = current.descriptorBufferPushDescriptors || merge_in.descriptorBufferPushDescriptors; +} +#endif //(defined(VK_EXT_descriptor_buffer)) +#if (defined(VK_EXT_graphics_pipeline_library)) +void compare_VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT(std::vector & error_list, VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT const& supported, VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT const& requested) { + if (requested.graphicsPipelineLibrary && !supported.graphicsPipelineLibrary) { + error_list.push_back("Missing feature VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT::graphicsPipelineLibrary"); + } +} +void merge_VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT(VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT & current, VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT const& merge_in) { + current.graphicsPipelineLibrary = current.graphicsPipelineLibrary || merge_in.graphicsPipelineLibrary; +} +#endif //(defined(VK_EXT_graphics_pipeline_library)) +#if (defined(VK_AMD_shader_early_and_late_fragment_tests)) +void compare_VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD(std::vector & error_list, VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD const& supported, VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD const& requested) { + if (requested.shaderEarlyAndLateFragmentTests && !supported.shaderEarlyAndLateFragmentTests) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD::shaderEarlyAndLateFragmentTests"); + } +} +void merge_VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD(VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD & current, VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD const& merge_in) { + current.shaderEarlyAndLateFragmentTests = current.shaderEarlyAndLateFragmentTests || merge_in.shaderEarlyAndLateFragmentTests; +} +#endif //(defined(VK_AMD_shader_early_and_late_fragment_tests)) +#if (defined(VK_NV_fragment_shading_rate_enums)) +void compare_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(std::vector & error_list, VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& supported, VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& requested) { + if (requested.fragmentShadingRateEnums && !supported.fragmentShadingRateEnums) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV::fragmentShadingRateEnums"); + } + if (requested.supersampleFragmentShadingRates && !supported.supersampleFragmentShadingRates) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV::supersampleFragmentShadingRates"); + } + if (requested.noInvocationFragmentShadingRates && !supported.noInvocationFragmentShadingRates) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV::noInvocationFragmentShadingRates"); + } +} +void merge_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV & current, VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& merge_in) { + current.fragmentShadingRateEnums = current.fragmentShadingRateEnums || merge_in.fragmentShadingRateEnums; + current.supersampleFragmentShadingRates = current.supersampleFragmentShadingRates || merge_in.supersampleFragmentShadingRates; + current.noInvocationFragmentShadingRates = current.noInvocationFragmentShadingRates || merge_in.noInvocationFragmentShadingRates; +} +#endif //(defined(VK_NV_fragment_shading_rate_enums)) +#if (defined(VK_NV_ray_tracing_motion_blur)) +void compare_VkPhysicalDeviceRayTracingMotionBlurFeaturesNV(std::vector & error_list, VkPhysicalDeviceRayTracingMotionBlurFeaturesNV const& supported, VkPhysicalDeviceRayTracingMotionBlurFeaturesNV const& requested) { + if (requested.rayTracingMotionBlur && !supported.rayTracingMotionBlur) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingMotionBlurFeaturesNV::rayTracingMotionBlur"); + } + if (requested.rayTracingMotionBlurPipelineTraceRaysIndirect && !supported.rayTracingMotionBlurPipelineTraceRaysIndirect) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingMotionBlurFeaturesNV::rayTracingMotionBlurPipelineTraceRaysIndirect"); + } +} +void merge_VkPhysicalDeviceRayTracingMotionBlurFeaturesNV(VkPhysicalDeviceRayTracingMotionBlurFeaturesNV & current, VkPhysicalDeviceRayTracingMotionBlurFeaturesNV const& merge_in) { + current.rayTracingMotionBlur = current.rayTracingMotionBlur || merge_in.rayTracingMotionBlur; + current.rayTracingMotionBlurPipelineTraceRaysIndirect = current.rayTracingMotionBlurPipelineTraceRaysIndirect || merge_in.rayTracingMotionBlurPipelineTraceRaysIndirect; +} +#endif //(defined(VK_NV_ray_tracing_motion_blur)) +#if (defined(VK_EXT_ycbcr_2plane_444_formats)) +void compare_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT(std::vector & error_list, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT const& supported, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT const& requested) { + if (requested.ycbcr2plane444Formats && !supported.ycbcr2plane444Formats) { + error_list.push_back("Missing feature VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT::ycbcr2plane444Formats"); + } +} +void merge_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT(VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT & current, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT const& merge_in) { + current.ycbcr2plane444Formats = current.ycbcr2plane444Formats || merge_in.ycbcr2plane444Formats; +} +#endif //(defined(VK_EXT_ycbcr_2plane_444_formats)) +#if (defined(VK_EXT_fragment_density_map2)) +void compare_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(std::vector & error_list, VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const& supported, VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const& requested) { + if (requested.fragmentDensityMapDeferred && !supported.fragmentDensityMapDeferred) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentDensityMap2FeaturesEXT::fragmentDensityMapDeferred"); + } +} +void merge_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(VkPhysicalDeviceFragmentDensityMap2FeaturesEXT & current, VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const& merge_in) { + current.fragmentDensityMapDeferred = current.fragmentDensityMapDeferred || merge_in.fragmentDensityMapDeferred; +} +#endif //(defined(VK_EXT_fragment_density_map2)) +#if (defined(VK_EXT_image_compression_control)) +void compare_VkPhysicalDeviceImageCompressionControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageCompressionControlFeaturesEXT const& supported, VkPhysicalDeviceImageCompressionControlFeaturesEXT const& requested) { + if (requested.imageCompressionControl && !supported.imageCompressionControl) { + error_list.push_back("Missing feature VkPhysicalDeviceImageCompressionControlFeaturesEXT::imageCompressionControl"); + } +} +void merge_VkPhysicalDeviceImageCompressionControlFeaturesEXT(VkPhysicalDeviceImageCompressionControlFeaturesEXT & current, VkPhysicalDeviceImageCompressionControlFeaturesEXT const& merge_in) { + current.imageCompressionControl = current.imageCompressionControl || merge_in.imageCompressionControl; +} +#endif //(defined(VK_EXT_image_compression_control)) +#if (defined(VK_EXT_attachment_feedback_loop_layout)) +void compare_VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT(std::vector & error_list, VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT const& supported, VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT const& requested) { + if (requested.attachmentFeedbackLoopLayout && !supported.attachmentFeedbackLoopLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT::attachmentFeedbackLoopLayout"); + } +} +void merge_VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT(VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT & current, VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT const& merge_in) { + current.attachmentFeedbackLoopLayout = current.attachmentFeedbackLoopLayout || merge_in.attachmentFeedbackLoopLayout; +} +#endif //(defined(VK_EXT_attachment_feedback_loop_layout)) +#if (defined(VK_EXT_4444_formats)) +void compare_VkPhysicalDevice4444FormatsFeaturesEXT(std::vector & error_list, VkPhysicalDevice4444FormatsFeaturesEXT const& supported, VkPhysicalDevice4444FormatsFeaturesEXT const& requested) { + if (requested.formatA4R4G4B4 && !supported.formatA4R4G4B4) { + error_list.push_back("Missing feature VkPhysicalDevice4444FormatsFeaturesEXT::formatA4R4G4B4"); + } + if (requested.formatA4B4G4R4 && !supported.formatA4B4G4R4) { + error_list.push_back("Missing feature VkPhysicalDevice4444FormatsFeaturesEXT::formatA4B4G4R4"); + } +} +void merge_VkPhysicalDevice4444FormatsFeaturesEXT(VkPhysicalDevice4444FormatsFeaturesEXT & current, VkPhysicalDevice4444FormatsFeaturesEXT const& merge_in) { + current.formatA4R4G4B4 = current.formatA4R4G4B4 || merge_in.formatA4R4G4B4; + current.formatA4B4G4R4 = current.formatA4B4G4R4 || merge_in.formatA4B4G4R4; +} +#endif //(defined(VK_EXT_4444_formats)) +#if (defined(VK_EXT_device_fault)) +void compare_VkPhysicalDeviceFaultFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFaultFeaturesEXT const& supported, VkPhysicalDeviceFaultFeaturesEXT const& requested) { + if (requested.deviceFault && !supported.deviceFault) { + error_list.push_back("Missing feature VkPhysicalDeviceFaultFeaturesEXT::deviceFault"); + } + if (requested.deviceFaultVendorBinary && !supported.deviceFaultVendorBinary) { + error_list.push_back("Missing feature VkPhysicalDeviceFaultFeaturesEXT::deviceFaultVendorBinary"); + } +} +void merge_VkPhysicalDeviceFaultFeaturesEXT(VkPhysicalDeviceFaultFeaturesEXT & current, VkPhysicalDeviceFaultFeaturesEXT const& merge_in) { + current.deviceFault = current.deviceFault || merge_in.deviceFault; + current.deviceFaultVendorBinary = current.deviceFaultVendorBinary || merge_in.deviceFaultVendorBinary; +} +#endif //(defined(VK_EXT_device_fault)) +#if (defined(VK_EXT_rasterization_order_attachment_access)) +void compare_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT(std::vector & error_list, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT const& supported, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT const& requested) { + if (requested.rasterizationOrderColorAttachmentAccess && !supported.rasterizationOrderColorAttachmentAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT::rasterizationOrderColorAttachmentAccess"); + } + if (requested.rasterizationOrderDepthAttachmentAccess && !supported.rasterizationOrderDepthAttachmentAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT::rasterizationOrderDepthAttachmentAccess"); + } + if (requested.rasterizationOrderStencilAttachmentAccess && !supported.rasterizationOrderStencilAttachmentAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT::rasterizationOrderStencilAttachmentAccess"); + } +} +void merge_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT(VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT & current, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT const& merge_in) { + current.rasterizationOrderColorAttachmentAccess = current.rasterizationOrderColorAttachmentAccess || merge_in.rasterizationOrderColorAttachmentAccess; + current.rasterizationOrderDepthAttachmentAccess = current.rasterizationOrderDepthAttachmentAccess || merge_in.rasterizationOrderDepthAttachmentAccess; + current.rasterizationOrderStencilAttachmentAccess = current.rasterizationOrderStencilAttachmentAccess || merge_in.rasterizationOrderStencilAttachmentAccess; +} +#endif //(defined(VK_EXT_rasterization_order_attachment_access)) +#if (defined(VK_ARM_rasterization_order_attachment_access)) +void compare_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM(std::vector & error_list, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM const& supported, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM const& requested) { + if (requested.rasterizationOrderColorAttachmentAccess && !supported.rasterizationOrderColorAttachmentAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM::rasterizationOrderColorAttachmentAccess"); + } + if (requested.rasterizationOrderDepthAttachmentAccess && !supported.rasterizationOrderDepthAttachmentAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM::rasterizationOrderDepthAttachmentAccess"); + } + if (requested.rasterizationOrderStencilAttachmentAccess && !supported.rasterizationOrderStencilAttachmentAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM::rasterizationOrderStencilAttachmentAccess"); + } +} +void merge_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM(VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM & current, VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM const& merge_in) { + current.rasterizationOrderColorAttachmentAccess = current.rasterizationOrderColorAttachmentAccess || merge_in.rasterizationOrderColorAttachmentAccess; + current.rasterizationOrderDepthAttachmentAccess = current.rasterizationOrderDepthAttachmentAccess || merge_in.rasterizationOrderDepthAttachmentAccess; + current.rasterizationOrderStencilAttachmentAccess = current.rasterizationOrderStencilAttachmentAccess || merge_in.rasterizationOrderStencilAttachmentAccess; +} +#endif //(defined(VK_ARM_rasterization_order_attachment_access)) +#if (defined(VK_EXT_rgba10x6_formats)) +void compare_VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT(std::vector & error_list, VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT const& supported, VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT const& requested) { + if (requested.formatRgba10x6WithoutYCbCrSampler && !supported.formatRgba10x6WithoutYCbCrSampler) { + error_list.push_back("Missing feature VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT::formatRgba10x6WithoutYCbCrSampler"); + } +} +void merge_VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT(VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT & current, VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT const& merge_in) { + current.formatRgba10x6WithoutYCbCrSampler = current.formatRgba10x6WithoutYCbCrSampler || merge_in.formatRgba10x6WithoutYCbCrSampler; +} +#endif //(defined(VK_EXT_rgba10x6_formats)) +#if (defined(VK_EXT_mutable_descriptor_type)) +void compare_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT const& supported, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT const& requested) { + if (requested.mutableDescriptorType && !supported.mutableDescriptorType) { + error_list.push_back("Missing feature VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT::mutableDescriptorType"); + } +} +void merge_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT(VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT & current, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT const& merge_in) { + current.mutableDescriptorType = current.mutableDescriptorType || merge_in.mutableDescriptorType; +} +#endif //(defined(VK_EXT_mutable_descriptor_type)) +#if (defined(VK_VALVE_mutable_descriptor_type)) +void compare_VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE(std::vector & error_list, VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE const& supported, VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE const& requested) { + if (requested.mutableDescriptorType && !supported.mutableDescriptorType) { + error_list.push_back("Missing feature VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE::mutableDescriptorType"); + } +} +void merge_VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE(VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE & current, VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE const& merge_in) { + current.mutableDescriptorType = current.mutableDescriptorType || merge_in.mutableDescriptorType; +} +#endif //(defined(VK_VALVE_mutable_descriptor_type)) +#if (defined(VK_EXT_vertex_input_dynamic_state)) +void compare_VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT(std::vector & error_list, VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT const& supported, VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT const& requested) { + if (requested.vertexInputDynamicState && !supported.vertexInputDynamicState) { + error_list.push_back("Missing feature VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT::vertexInputDynamicState"); + } +} +void merge_VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT(VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT & current, VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT const& merge_in) { + current.vertexInputDynamicState = current.vertexInputDynamicState || merge_in.vertexInputDynamicState; +} +#endif //(defined(VK_EXT_vertex_input_dynamic_state)) +#if (defined(VK_EXT_device_address_binding_report)) +void compare_VkPhysicalDeviceAddressBindingReportFeaturesEXT(std::vector & error_list, VkPhysicalDeviceAddressBindingReportFeaturesEXT const& supported, VkPhysicalDeviceAddressBindingReportFeaturesEXT const& requested) { + if (requested.reportAddressBinding && !supported.reportAddressBinding) { + error_list.push_back("Missing feature VkPhysicalDeviceAddressBindingReportFeaturesEXT::reportAddressBinding"); + } +} +void merge_VkPhysicalDeviceAddressBindingReportFeaturesEXT(VkPhysicalDeviceAddressBindingReportFeaturesEXT & current, VkPhysicalDeviceAddressBindingReportFeaturesEXT const& merge_in) { + current.reportAddressBinding = current.reportAddressBinding || merge_in.reportAddressBinding; +} +#endif //(defined(VK_EXT_device_address_binding_report)) +#if (defined(VK_EXT_depth_clip_control)) +void compare_VkPhysicalDeviceDepthClipControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthClipControlFeaturesEXT const& supported, VkPhysicalDeviceDepthClipControlFeaturesEXT const& requested) { + if (requested.depthClipControl && !supported.depthClipControl) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthClipControlFeaturesEXT::depthClipControl"); + } +} +void merge_VkPhysicalDeviceDepthClipControlFeaturesEXT(VkPhysicalDeviceDepthClipControlFeaturesEXT & current, VkPhysicalDeviceDepthClipControlFeaturesEXT const& merge_in) { + current.depthClipControl = current.depthClipControl || merge_in.depthClipControl; +} +#endif //(defined(VK_EXT_depth_clip_control)) +#if (defined(VK_EXT_primitive_topology_list_restart)) +void compare_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT(std::vector & error_list, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT const& supported, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT const& requested) { + if (requested.primitiveTopologyListRestart && !supported.primitiveTopologyListRestart) { + error_list.push_back("Missing feature VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT::primitiveTopologyListRestart"); + } + if (requested.primitiveTopologyPatchListRestart && !supported.primitiveTopologyPatchListRestart) { + error_list.push_back("Missing feature VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT::primitiveTopologyPatchListRestart"); + } +} +void merge_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT(VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT & current, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT const& merge_in) { + current.primitiveTopologyListRestart = current.primitiveTopologyListRestart || merge_in.primitiveTopologyListRestart; + current.primitiveTopologyPatchListRestart = current.primitiveTopologyPatchListRestart || merge_in.primitiveTopologyPatchListRestart; +} +#endif //(defined(VK_EXT_primitive_topology_list_restart)) +#if (defined(VK_HUAWEI_subpass_shading)) +void compare_VkPhysicalDeviceSubpassShadingFeaturesHUAWEI(std::vector & error_list, VkPhysicalDeviceSubpassShadingFeaturesHUAWEI const& supported, VkPhysicalDeviceSubpassShadingFeaturesHUAWEI const& requested) { + if (requested.subpassShading && !supported.subpassShading) { + error_list.push_back("Missing feature VkPhysicalDeviceSubpassShadingFeaturesHUAWEI::subpassShading"); + } +} +void merge_VkPhysicalDeviceSubpassShadingFeaturesHUAWEI(VkPhysicalDeviceSubpassShadingFeaturesHUAWEI & current, VkPhysicalDeviceSubpassShadingFeaturesHUAWEI const& merge_in) { + current.subpassShading = current.subpassShading || merge_in.subpassShading; +} +#endif //(defined(VK_HUAWEI_subpass_shading)) +#if (defined(VK_HUAWEI_invocation_mask)) +void compare_VkPhysicalDeviceInvocationMaskFeaturesHUAWEI(std::vector & error_list, VkPhysicalDeviceInvocationMaskFeaturesHUAWEI const& supported, VkPhysicalDeviceInvocationMaskFeaturesHUAWEI const& requested) { + if (requested.invocationMask && !supported.invocationMask) { + error_list.push_back("Missing feature VkPhysicalDeviceInvocationMaskFeaturesHUAWEI::invocationMask"); + } +} +void merge_VkPhysicalDeviceInvocationMaskFeaturesHUAWEI(VkPhysicalDeviceInvocationMaskFeaturesHUAWEI & current, VkPhysicalDeviceInvocationMaskFeaturesHUAWEI const& merge_in) { + current.invocationMask = current.invocationMask || merge_in.invocationMask; +} +#endif //(defined(VK_HUAWEI_invocation_mask)) +#if (defined(VK_NV_external_memory_rdma)) +void compare_VkPhysicalDeviceExternalMemoryRDMAFeaturesNV(std::vector & error_list, VkPhysicalDeviceExternalMemoryRDMAFeaturesNV const& supported, VkPhysicalDeviceExternalMemoryRDMAFeaturesNV const& requested) { + if (requested.externalMemoryRDMA && !supported.externalMemoryRDMA) { + error_list.push_back("Missing feature VkPhysicalDeviceExternalMemoryRDMAFeaturesNV::externalMemoryRDMA"); + } +} +void merge_VkPhysicalDeviceExternalMemoryRDMAFeaturesNV(VkPhysicalDeviceExternalMemoryRDMAFeaturesNV & current, VkPhysicalDeviceExternalMemoryRDMAFeaturesNV const& merge_in) { + current.externalMemoryRDMA = current.externalMemoryRDMA || merge_in.externalMemoryRDMA; +} +#endif //(defined(VK_NV_external_memory_rdma)) +#if (defined(VK_EXT_pipeline_properties)) +void compare_VkPhysicalDevicePipelinePropertiesFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelinePropertiesFeaturesEXT const& supported, VkPhysicalDevicePipelinePropertiesFeaturesEXT const& requested) { + if (requested.pipelinePropertiesIdentifier && !supported.pipelinePropertiesIdentifier) { + error_list.push_back("Missing feature VkPhysicalDevicePipelinePropertiesFeaturesEXT::pipelinePropertiesIdentifier"); + } +} +void merge_VkPhysicalDevicePipelinePropertiesFeaturesEXT(VkPhysicalDevicePipelinePropertiesFeaturesEXT & current, VkPhysicalDevicePipelinePropertiesFeaturesEXT const& merge_in) { + current.pipelinePropertiesIdentifier = current.pipelinePropertiesIdentifier || merge_in.pipelinePropertiesIdentifier; +} +#endif //(defined(VK_EXT_pipeline_properties)) +#if (defined(VK_EXT_frame_boundary)) +void compare_VkPhysicalDeviceFrameBoundaryFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFrameBoundaryFeaturesEXT const& supported, VkPhysicalDeviceFrameBoundaryFeaturesEXT const& requested) { + if (requested.frameBoundary && !supported.frameBoundary) { + error_list.push_back("Missing feature VkPhysicalDeviceFrameBoundaryFeaturesEXT::frameBoundary"); + } +} +void merge_VkPhysicalDeviceFrameBoundaryFeaturesEXT(VkPhysicalDeviceFrameBoundaryFeaturesEXT & current, VkPhysicalDeviceFrameBoundaryFeaturesEXT const& merge_in) { + current.frameBoundary = current.frameBoundary || merge_in.frameBoundary; +} +#endif //(defined(VK_EXT_frame_boundary)) +#if (defined(VK_EXT_multisampled_render_to_single_sampled)) +void compare_VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT const& supported, VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT const& requested) { + if (requested.multisampledRenderToSingleSampled && !supported.multisampledRenderToSingleSampled) { + error_list.push_back("Missing feature VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT::multisampledRenderToSingleSampled"); + } +} +void merge_VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT(VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT & current, VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT const& merge_in) { + current.multisampledRenderToSingleSampled = current.multisampledRenderToSingleSampled || merge_in.multisampledRenderToSingleSampled; +} +#endif //(defined(VK_EXT_multisampled_render_to_single_sampled)) +#if (defined(VK_EXT_extended_dynamic_state2)) +void compare_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT(std::vector & error_list, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT const& supported, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT const& requested) { + if (requested.extendedDynamicState2 && !supported.extendedDynamicState2) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState2FeaturesEXT::extendedDynamicState2"); + } + if (requested.extendedDynamicState2LogicOp && !supported.extendedDynamicState2LogicOp) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState2FeaturesEXT::extendedDynamicState2LogicOp"); + } + if (requested.extendedDynamicState2PatchControlPoints && !supported.extendedDynamicState2PatchControlPoints) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState2FeaturesEXT::extendedDynamicState2PatchControlPoints"); + } +} +void merge_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT(VkPhysicalDeviceExtendedDynamicState2FeaturesEXT & current, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT const& merge_in) { + current.extendedDynamicState2 = current.extendedDynamicState2 || merge_in.extendedDynamicState2; + current.extendedDynamicState2LogicOp = current.extendedDynamicState2LogicOp || merge_in.extendedDynamicState2LogicOp; + current.extendedDynamicState2PatchControlPoints = current.extendedDynamicState2PatchControlPoints || merge_in.extendedDynamicState2PatchControlPoints; +} +#endif //(defined(VK_EXT_extended_dynamic_state2)) +#if (defined(VK_EXT_color_write_enable)) +void compare_VkPhysicalDeviceColorWriteEnableFeaturesEXT(std::vector & error_list, VkPhysicalDeviceColorWriteEnableFeaturesEXT const& supported, VkPhysicalDeviceColorWriteEnableFeaturesEXT const& requested) { + if (requested.colorWriteEnable && !supported.colorWriteEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceColorWriteEnableFeaturesEXT::colorWriteEnable"); + } +} +void merge_VkPhysicalDeviceColorWriteEnableFeaturesEXT(VkPhysicalDeviceColorWriteEnableFeaturesEXT & current, VkPhysicalDeviceColorWriteEnableFeaturesEXT const& merge_in) { + current.colorWriteEnable = current.colorWriteEnable || merge_in.colorWriteEnable; +} +#endif //(defined(VK_EXT_color_write_enable)) +#if (defined(VK_EXT_primitives_generated_query)) +void compare_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT(std::vector & error_list, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT const& supported, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT const& requested) { + if (requested.primitivesGeneratedQuery && !supported.primitivesGeneratedQuery) { + error_list.push_back("Missing feature VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT::primitivesGeneratedQuery"); + } + if (requested.primitivesGeneratedQueryWithRasterizerDiscard && !supported.primitivesGeneratedQueryWithRasterizerDiscard) { + error_list.push_back("Missing feature VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT::primitivesGeneratedQueryWithRasterizerDiscard"); + } + if (requested.primitivesGeneratedQueryWithNonZeroStreams && !supported.primitivesGeneratedQueryWithNonZeroStreams) { + error_list.push_back("Missing feature VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT::primitivesGeneratedQueryWithNonZeroStreams"); + } +} +void merge_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT(VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT & current, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT const& merge_in) { + current.primitivesGeneratedQuery = current.primitivesGeneratedQuery || merge_in.primitivesGeneratedQuery; + current.primitivesGeneratedQueryWithRasterizerDiscard = current.primitivesGeneratedQueryWithRasterizerDiscard || merge_in.primitivesGeneratedQueryWithRasterizerDiscard; + current.primitivesGeneratedQueryWithNonZeroStreams = current.primitivesGeneratedQueryWithNonZeroStreams || merge_in.primitivesGeneratedQueryWithNonZeroStreams; +} +#endif //(defined(VK_EXT_primitives_generated_query)) +#if (defined(VK_VALVE_video_encode_rgb_conversion)) +void compare_VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE(std::vector & error_list, VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE const& supported, VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE const& requested) { + if (requested.videoEncodeRgbConversion && !supported.videoEncodeRgbConversion) { + error_list.push_back("Missing feature VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE::videoEncodeRgbConversion"); + } +} +void merge_VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE(VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE & current, VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE const& merge_in) { + current.videoEncodeRgbConversion = current.videoEncodeRgbConversion || merge_in.videoEncodeRgbConversion; +} +#endif //(defined(VK_VALVE_video_encode_rgb_conversion)) +#if (defined(VK_EXT_image_view_min_lod)) +void compare_VkPhysicalDeviceImageViewMinLodFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageViewMinLodFeaturesEXT const& supported, VkPhysicalDeviceImageViewMinLodFeaturesEXT const& requested) { + if (requested.minLod && !supported.minLod) { + error_list.push_back("Missing feature VkPhysicalDeviceImageViewMinLodFeaturesEXT::minLod"); + } +} +void merge_VkPhysicalDeviceImageViewMinLodFeaturesEXT(VkPhysicalDeviceImageViewMinLodFeaturesEXT & current, VkPhysicalDeviceImageViewMinLodFeaturesEXT const& merge_in) { + current.minLod = current.minLod || merge_in.minLod; +} +#endif //(defined(VK_EXT_image_view_min_lod)) +#if (defined(VK_EXT_multi_draw)) +void compare_VkPhysicalDeviceMultiDrawFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMultiDrawFeaturesEXT const& supported, VkPhysicalDeviceMultiDrawFeaturesEXT const& requested) { + if (requested.multiDraw && !supported.multiDraw) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiDrawFeaturesEXT::multiDraw"); + } +} +void merge_VkPhysicalDeviceMultiDrawFeaturesEXT(VkPhysicalDeviceMultiDrawFeaturesEXT & current, VkPhysicalDeviceMultiDrawFeaturesEXT const& merge_in) { + current.multiDraw = current.multiDraw || merge_in.multiDraw; +} +#endif //(defined(VK_EXT_multi_draw)) +#if (defined(VK_EXT_image_2d_view_of_3d)) +void compare_VkPhysicalDeviceImage2DViewOf3DFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImage2DViewOf3DFeaturesEXT const& supported, VkPhysicalDeviceImage2DViewOf3DFeaturesEXT const& requested) { + if (requested.image2DViewOf3D && !supported.image2DViewOf3D) { + error_list.push_back("Missing feature VkPhysicalDeviceImage2DViewOf3DFeaturesEXT::image2DViewOf3D"); + } + if (requested.sampler2DViewOf3D && !supported.sampler2DViewOf3D) { + error_list.push_back("Missing feature VkPhysicalDeviceImage2DViewOf3DFeaturesEXT::sampler2DViewOf3D"); + } +} +void merge_VkPhysicalDeviceImage2DViewOf3DFeaturesEXT(VkPhysicalDeviceImage2DViewOf3DFeaturesEXT & current, VkPhysicalDeviceImage2DViewOf3DFeaturesEXT const& merge_in) { + current.image2DViewOf3D = current.image2DViewOf3D || merge_in.image2DViewOf3D; + current.sampler2DViewOf3D = current.sampler2DViewOf3D || merge_in.sampler2DViewOf3D; +} +#endif //(defined(VK_EXT_image_2d_view_of_3d)) +#if (defined(VK_EXT_shader_tile_image)) +void compare_VkPhysicalDeviceShaderTileImageFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderTileImageFeaturesEXT const& supported, VkPhysicalDeviceShaderTileImageFeaturesEXT const& requested) { + if (requested.shaderTileImageColorReadAccess && !supported.shaderTileImageColorReadAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderTileImageFeaturesEXT::shaderTileImageColorReadAccess"); + } + if (requested.shaderTileImageDepthReadAccess && !supported.shaderTileImageDepthReadAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderTileImageFeaturesEXT::shaderTileImageDepthReadAccess"); + } + if (requested.shaderTileImageStencilReadAccess && !supported.shaderTileImageStencilReadAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderTileImageFeaturesEXT::shaderTileImageStencilReadAccess"); + } +} +void merge_VkPhysicalDeviceShaderTileImageFeaturesEXT(VkPhysicalDeviceShaderTileImageFeaturesEXT & current, VkPhysicalDeviceShaderTileImageFeaturesEXT const& merge_in) { + current.shaderTileImageColorReadAccess = current.shaderTileImageColorReadAccess || merge_in.shaderTileImageColorReadAccess; + current.shaderTileImageDepthReadAccess = current.shaderTileImageDepthReadAccess || merge_in.shaderTileImageDepthReadAccess; + current.shaderTileImageStencilReadAccess = current.shaderTileImageStencilReadAccess || merge_in.shaderTileImageStencilReadAccess; +} +#endif //(defined(VK_EXT_shader_tile_image)) +#if (defined(VK_EXT_opacity_micromap)) +void compare_VkPhysicalDeviceOpacityMicromapFeaturesEXT(std::vector & error_list, VkPhysicalDeviceOpacityMicromapFeaturesEXT const& supported, VkPhysicalDeviceOpacityMicromapFeaturesEXT const& requested) { + if (requested.micromap && !supported.micromap) { + error_list.push_back("Missing feature VkPhysicalDeviceOpacityMicromapFeaturesEXT::micromap"); + } + if (requested.micromapCaptureReplay && !supported.micromapCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceOpacityMicromapFeaturesEXT::micromapCaptureReplay"); + } + if (requested.micromapHostCommands && !supported.micromapHostCommands) { + error_list.push_back("Missing feature VkPhysicalDeviceOpacityMicromapFeaturesEXT::micromapHostCommands"); + } +} +void merge_VkPhysicalDeviceOpacityMicromapFeaturesEXT(VkPhysicalDeviceOpacityMicromapFeaturesEXT & current, VkPhysicalDeviceOpacityMicromapFeaturesEXT const& merge_in) { + current.micromap = current.micromap || merge_in.micromap; + current.micromapCaptureReplay = current.micromapCaptureReplay || merge_in.micromapCaptureReplay; + current.micromapHostCommands = current.micromapHostCommands || merge_in.micromapHostCommands; +} +#endif //(defined(VK_EXT_opacity_micromap)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_displacement_micromap)) +void compare_VkPhysicalDeviceDisplacementMicromapFeaturesNV(std::vector & error_list, VkPhysicalDeviceDisplacementMicromapFeaturesNV const& supported, VkPhysicalDeviceDisplacementMicromapFeaturesNV const& requested) { + if (requested.displacementMicromap && !supported.displacementMicromap) { + error_list.push_back("Missing feature VkPhysicalDeviceDisplacementMicromapFeaturesNV::displacementMicromap"); + } +} +void merge_VkPhysicalDeviceDisplacementMicromapFeaturesNV(VkPhysicalDeviceDisplacementMicromapFeaturesNV & current, VkPhysicalDeviceDisplacementMicromapFeaturesNV const& merge_in) { + current.displacementMicromap = current.displacementMicromap || merge_in.displacementMicromap; +} +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_displacement_micromap)) +#if (defined(VK_HUAWEI_cluster_culling_shader)) +void compare_VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI(std::vector & error_list, VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI const& supported, VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI const& requested) { + if (requested.clustercullingShader && !supported.clustercullingShader) { + error_list.push_back("Missing feature VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI::clustercullingShader"); + } + if (requested.multiviewClusterCullingShader && !supported.multiviewClusterCullingShader) { + error_list.push_back("Missing feature VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI::multiviewClusterCullingShader"); + } +} +void merge_VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI(VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI & current, VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI const& merge_in) { + current.clustercullingShader = current.clustercullingShader || merge_in.clustercullingShader; + current.multiviewClusterCullingShader = current.multiviewClusterCullingShader || merge_in.multiviewClusterCullingShader; +} +#endif //(defined(VK_HUAWEI_cluster_culling_shader)) +#if (defined(VK_EXT_border_color_swizzle)) +void compare_VkPhysicalDeviceBorderColorSwizzleFeaturesEXT(std::vector & error_list, VkPhysicalDeviceBorderColorSwizzleFeaturesEXT const& supported, VkPhysicalDeviceBorderColorSwizzleFeaturesEXT const& requested) { + if (requested.borderColorSwizzle && !supported.borderColorSwizzle) { + error_list.push_back("Missing feature VkPhysicalDeviceBorderColorSwizzleFeaturesEXT::borderColorSwizzle"); + } + if (requested.borderColorSwizzleFromImage && !supported.borderColorSwizzleFromImage) { + error_list.push_back("Missing feature VkPhysicalDeviceBorderColorSwizzleFeaturesEXT::borderColorSwizzleFromImage"); + } +} +void merge_VkPhysicalDeviceBorderColorSwizzleFeaturesEXT(VkPhysicalDeviceBorderColorSwizzleFeaturesEXT & current, VkPhysicalDeviceBorderColorSwizzleFeaturesEXT const& merge_in) { + current.borderColorSwizzle = current.borderColorSwizzle || merge_in.borderColorSwizzle; + current.borderColorSwizzleFromImage = current.borderColorSwizzleFromImage || merge_in.borderColorSwizzleFromImage; +} +#endif //(defined(VK_EXT_border_color_swizzle)) +#if (defined(VK_EXT_pageable_device_local_memory)) +void compare_VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT(std::vector & error_list, VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT const& supported, VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT const& requested) { + if (requested.pageableDeviceLocalMemory && !supported.pageableDeviceLocalMemory) { + error_list.push_back("Missing feature VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT::pageableDeviceLocalMemory"); + } +} +void merge_VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT(VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT & current, VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT const& merge_in) { + current.pageableDeviceLocalMemory = current.pageableDeviceLocalMemory || merge_in.pageableDeviceLocalMemory; +} +#endif //(defined(VK_EXT_pageable_device_local_memory)) +#if (defined(VK_ARM_scheduling_controls)) +void compare_VkPhysicalDeviceSchedulingControlsFeaturesARM(std::vector & error_list, VkPhysicalDeviceSchedulingControlsFeaturesARM const& supported, VkPhysicalDeviceSchedulingControlsFeaturesARM const& requested) { + if (requested.schedulingControls && !supported.schedulingControls) { + error_list.push_back("Missing feature VkPhysicalDeviceSchedulingControlsFeaturesARM::schedulingControls"); + } +} +void merge_VkPhysicalDeviceSchedulingControlsFeaturesARM(VkPhysicalDeviceSchedulingControlsFeaturesARM & current, VkPhysicalDeviceSchedulingControlsFeaturesARM const& merge_in) { + current.schedulingControls = current.schedulingControls || merge_in.schedulingControls; +} +#endif //(defined(VK_ARM_scheduling_controls)) +#if (defined(VK_EXT_image_sliced_view_of_3d)) +void compare_VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT const& supported, VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT const& requested) { + if (requested.imageSlicedViewOf3D && !supported.imageSlicedViewOf3D) { + error_list.push_back("Missing feature VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT::imageSlicedViewOf3D"); + } +} +void merge_VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT(VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT & current, VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT const& merge_in) { + current.imageSlicedViewOf3D = current.imageSlicedViewOf3D || merge_in.imageSlicedViewOf3D; +} +#endif //(defined(VK_EXT_image_sliced_view_of_3d)) +#if (defined(VK_VALVE_descriptor_set_host_mapping)) +void compare_VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE(std::vector & error_list, VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE const& supported, VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE const& requested) { + if (requested.descriptorSetHostMapping && !supported.descriptorSetHostMapping) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE::descriptorSetHostMapping"); + } +} +void merge_VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE(VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE & current, VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE const& merge_in) { + current.descriptorSetHostMapping = current.descriptorSetHostMapping || merge_in.descriptorSetHostMapping; +} +#endif //(defined(VK_VALVE_descriptor_set_host_mapping)) +#if (defined(VK_EXT_non_seamless_cube_map)) +void compare_VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT(std::vector & error_list, VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT const& supported, VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT const& requested) { + if (requested.nonSeamlessCubeMap && !supported.nonSeamlessCubeMap) { + error_list.push_back("Missing feature VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT::nonSeamlessCubeMap"); + } +} +void merge_VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT(VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT & current, VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT const& merge_in) { + current.nonSeamlessCubeMap = current.nonSeamlessCubeMap || merge_in.nonSeamlessCubeMap; +} +#endif //(defined(VK_EXT_non_seamless_cube_map)) +#if (defined(VK_ARM_render_pass_striped)) +void compare_VkPhysicalDeviceRenderPassStripedFeaturesARM(std::vector & error_list, VkPhysicalDeviceRenderPassStripedFeaturesARM const& supported, VkPhysicalDeviceRenderPassStripedFeaturesARM const& requested) { + if (requested.renderPassStriped && !supported.renderPassStriped) { + error_list.push_back("Missing feature VkPhysicalDeviceRenderPassStripedFeaturesARM::renderPassStriped"); + } +} +void merge_VkPhysicalDeviceRenderPassStripedFeaturesARM(VkPhysicalDeviceRenderPassStripedFeaturesARM & current, VkPhysicalDeviceRenderPassStripedFeaturesARM const& merge_in) { + current.renderPassStriped = current.renderPassStriped || merge_in.renderPassStriped; +} +#endif //(defined(VK_ARM_render_pass_striped)) +#if (defined(VK_EXT_fragment_density_map_offset)) +void compare_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT(std::vector & error_list, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT const& supported, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT const& requested) { + if (requested.fragmentDensityMapOffset && !supported.fragmentDensityMapOffset) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT::fragmentDensityMapOffset"); + } +} +void merge_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT(VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT & current, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT const& merge_in) { + current.fragmentDensityMapOffset = current.fragmentDensityMapOffset || merge_in.fragmentDensityMapOffset; +} +#endif //(defined(VK_EXT_fragment_density_map_offset)) +#if (defined(VK_QCOM_fragment_density_map_offset)) +void compare_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM const& supported, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM const& requested) { + if (requested.fragmentDensityMapOffset && !supported.fragmentDensityMapOffset) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM::fragmentDensityMapOffset"); + } +} +void merge_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM(VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM & current, VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM const& merge_in) { + current.fragmentDensityMapOffset = current.fragmentDensityMapOffset || merge_in.fragmentDensityMapOffset; +} +#endif //(defined(VK_QCOM_fragment_density_map_offset)) +#if (defined(VK_NV_copy_memory_indirect)) +void compare_VkPhysicalDeviceCopyMemoryIndirectFeaturesNV(std::vector & error_list, VkPhysicalDeviceCopyMemoryIndirectFeaturesNV const& supported, VkPhysicalDeviceCopyMemoryIndirectFeaturesNV const& requested) { + if (requested.indirectCopy && !supported.indirectCopy) { + error_list.push_back("Missing feature VkPhysicalDeviceCopyMemoryIndirectFeaturesNV::indirectCopy"); + } +} +void merge_VkPhysicalDeviceCopyMemoryIndirectFeaturesNV(VkPhysicalDeviceCopyMemoryIndirectFeaturesNV & current, VkPhysicalDeviceCopyMemoryIndirectFeaturesNV const& merge_in) { + current.indirectCopy = current.indirectCopy || merge_in.indirectCopy; +} +#endif //(defined(VK_NV_copy_memory_indirect)) +#if (defined(VK_EXT_memory_decompression)) +void compare_VkPhysicalDeviceMemoryDecompressionFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMemoryDecompressionFeaturesEXT const& supported, VkPhysicalDeviceMemoryDecompressionFeaturesEXT const& requested) { + if (requested.memoryDecompression && !supported.memoryDecompression) { + error_list.push_back("Missing feature VkPhysicalDeviceMemoryDecompressionFeaturesEXT::memoryDecompression"); + } +} +void merge_VkPhysicalDeviceMemoryDecompressionFeaturesEXT(VkPhysicalDeviceMemoryDecompressionFeaturesEXT & current, VkPhysicalDeviceMemoryDecompressionFeaturesEXT const& merge_in) { + current.memoryDecompression = current.memoryDecompression || merge_in.memoryDecompression; +} +#endif //(defined(VK_EXT_memory_decompression)) +#if (defined(VK_NV_memory_decompression)) +void compare_VkPhysicalDeviceMemoryDecompressionFeaturesNV(std::vector & error_list, VkPhysicalDeviceMemoryDecompressionFeaturesNV const& supported, VkPhysicalDeviceMemoryDecompressionFeaturesNV const& requested) { + if (requested.memoryDecompression && !supported.memoryDecompression) { + error_list.push_back("Missing feature VkPhysicalDeviceMemoryDecompressionFeaturesNV::memoryDecompression"); + } +} +void merge_VkPhysicalDeviceMemoryDecompressionFeaturesNV(VkPhysicalDeviceMemoryDecompressionFeaturesNV & current, VkPhysicalDeviceMemoryDecompressionFeaturesNV const& merge_in) { + current.memoryDecompression = current.memoryDecompression || merge_in.memoryDecompression; +} +#endif //(defined(VK_NV_memory_decompression)) +#if (defined(VK_NV_device_generated_commands_compute)) +void compare_VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV(std::vector & error_list, VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV const& supported, VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV const& requested) { + if (requested.deviceGeneratedCompute && !supported.deviceGeneratedCompute) { + error_list.push_back("Missing feature VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV::deviceGeneratedCompute"); + } + if (requested.deviceGeneratedComputePipelines && !supported.deviceGeneratedComputePipelines) { + error_list.push_back("Missing feature VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV::deviceGeneratedComputePipelines"); + } + if (requested.deviceGeneratedComputeCaptureReplay && !supported.deviceGeneratedComputeCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV::deviceGeneratedComputeCaptureReplay"); + } +} +void merge_VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV(VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV & current, VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV const& merge_in) { + current.deviceGeneratedCompute = current.deviceGeneratedCompute || merge_in.deviceGeneratedCompute; + current.deviceGeneratedComputePipelines = current.deviceGeneratedComputePipelines || merge_in.deviceGeneratedComputePipelines; + current.deviceGeneratedComputeCaptureReplay = current.deviceGeneratedComputeCaptureReplay || merge_in.deviceGeneratedComputeCaptureReplay; +} +#endif //(defined(VK_NV_device_generated_commands_compute)) +#if (defined(VK_NV_ray_tracing_linear_swept_spheres)) +void compare_VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV(std::vector & error_list, VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV const& supported, VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV const& requested) { + if (requested.spheres && !supported.spheres) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV::spheres"); + } + if (requested.linearSweptSpheres && !supported.linearSweptSpheres) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV::linearSweptSpheres"); + } +} +void merge_VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV(VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV & current, VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV const& merge_in) { + current.spheres = current.spheres || merge_in.spheres; + current.linearSweptSpheres = current.linearSweptSpheres || merge_in.linearSweptSpheres; +} +#endif //(defined(VK_NV_ray_tracing_linear_swept_spheres)) +#if (defined(VK_NV_linear_color_attachment)) +void compare_VkPhysicalDeviceLinearColorAttachmentFeaturesNV(std::vector & error_list, VkPhysicalDeviceLinearColorAttachmentFeaturesNV const& supported, VkPhysicalDeviceLinearColorAttachmentFeaturesNV const& requested) { + if (requested.linearColorAttachment && !supported.linearColorAttachment) { + error_list.push_back("Missing feature VkPhysicalDeviceLinearColorAttachmentFeaturesNV::linearColorAttachment"); + } +} +void merge_VkPhysicalDeviceLinearColorAttachmentFeaturesNV(VkPhysicalDeviceLinearColorAttachmentFeaturesNV & current, VkPhysicalDeviceLinearColorAttachmentFeaturesNV const& merge_in) { + current.linearColorAttachment = current.linearColorAttachment || merge_in.linearColorAttachment; +} +#endif //(defined(VK_NV_linear_color_attachment)) +#if (defined(VK_EXT_image_compression_control_swapchain)) +void compare_VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT(std::vector & error_list, VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT const& supported, VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT const& requested) { + if (requested.imageCompressionControlSwapchain && !supported.imageCompressionControlSwapchain) { + error_list.push_back("Missing feature VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT::imageCompressionControlSwapchain"); + } +} +void merge_VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT(VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT & current, VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT const& merge_in) { + current.imageCompressionControlSwapchain = current.imageCompressionControlSwapchain || merge_in.imageCompressionControlSwapchain; +} +#endif //(defined(VK_EXT_image_compression_control_swapchain)) +#if (defined(VK_QCOM_image_processing)) +void compare_VkPhysicalDeviceImageProcessingFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceImageProcessingFeaturesQCOM const& supported, VkPhysicalDeviceImageProcessingFeaturesQCOM const& requested) { + if (requested.textureSampleWeighted && !supported.textureSampleWeighted) { + error_list.push_back("Missing feature VkPhysicalDeviceImageProcessingFeaturesQCOM::textureSampleWeighted"); + } + if (requested.textureBoxFilter && !supported.textureBoxFilter) { + error_list.push_back("Missing feature VkPhysicalDeviceImageProcessingFeaturesQCOM::textureBoxFilter"); + } + if (requested.textureBlockMatch && !supported.textureBlockMatch) { + error_list.push_back("Missing feature VkPhysicalDeviceImageProcessingFeaturesQCOM::textureBlockMatch"); + } +} +void merge_VkPhysicalDeviceImageProcessingFeaturesQCOM(VkPhysicalDeviceImageProcessingFeaturesQCOM & current, VkPhysicalDeviceImageProcessingFeaturesQCOM const& merge_in) { + current.textureSampleWeighted = current.textureSampleWeighted || merge_in.textureSampleWeighted; + current.textureBoxFilter = current.textureBoxFilter || merge_in.textureBoxFilter; + current.textureBlockMatch = current.textureBlockMatch || merge_in.textureBlockMatch; +} +#endif //(defined(VK_QCOM_image_processing)) +#if (defined(VK_EXT_nested_command_buffer)) +void compare_VkPhysicalDeviceNestedCommandBufferFeaturesEXT(std::vector & error_list, VkPhysicalDeviceNestedCommandBufferFeaturesEXT const& supported, VkPhysicalDeviceNestedCommandBufferFeaturesEXT const& requested) { + if (requested.nestedCommandBuffer && !supported.nestedCommandBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceNestedCommandBufferFeaturesEXT::nestedCommandBuffer"); + } + if (requested.nestedCommandBufferRendering && !supported.nestedCommandBufferRendering) { + error_list.push_back("Missing feature VkPhysicalDeviceNestedCommandBufferFeaturesEXT::nestedCommandBufferRendering"); + } + if (requested.nestedCommandBufferSimultaneousUse && !supported.nestedCommandBufferSimultaneousUse) { + error_list.push_back("Missing feature VkPhysicalDeviceNestedCommandBufferFeaturesEXT::nestedCommandBufferSimultaneousUse"); + } +} +void merge_VkPhysicalDeviceNestedCommandBufferFeaturesEXT(VkPhysicalDeviceNestedCommandBufferFeaturesEXT & current, VkPhysicalDeviceNestedCommandBufferFeaturesEXT const& merge_in) { + current.nestedCommandBuffer = current.nestedCommandBuffer || merge_in.nestedCommandBuffer; + current.nestedCommandBufferRendering = current.nestedCommandBufferRendering || merge_in.nestedCommandBufferRendering; + current.nestedCommandBufferSimultaneousUse = current.nestedCommandBufferSimultaneousUse || merge_in.nestedCommandBufferSimultaneousUse; +} +#endif //(defined(VK_EXT_nested_command_buffer)) +#if (defined(VK_EXT_extended_dynamic_state3)) +void compare_VkPhysicalDeviceExtendedDynamicState3FeaturesEXT(std::vector & error_list, VkPhysicalDeviceExtendedDynamicState3FeaturesEXT const& supported, VkPhysicalDeviceExtendedDynamicState3FeaturesEXT const& requested) { + if (requested.extendedDynamicState3TessellationDomainOrigin && !supported.extendedDynamicState3TessellationDomainOrigin) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3TessellationDomainOrigin"); + } + if (requested.extendedDynamicState3DepthClampEnable && !supported.extendedDynamicState3DepthClampEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3DepthClampEnable"); + } + if (requested.extendedDynamicState3PolygonMode && !supported.extendedDynamicState3PolygonMode) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3PolygonMode"); + } + if (requested.extendedDynamicState3RasterizationSamples && !supported.extendedDynamicState3RasterizationSamples) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3RasterizationSamples"); + } + if (requested.extendedDynamicState3SampleMask && !supported.extendedDynamicState3SampleMask) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3SampleMask"); + } + if (requested.extendedDynamicState3AlphaToCoverageEnable && !supported.extendedDynamicState3AlphaToCoverageEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3AlphaToCoverageEnable"); + } + if (requested.extendedDynamicState3AlphaToOneEnable && !supported.extendedDynamicState3AlphaToOneEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3AlphaToOneEnable"); + } + if (requested.extendedDynamicState3LogicOpEnable && !supported.extendedDynamicState3LogicOpEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3LogicOpEnable"); + } + if (requested.extendedDynamicState3ColorBlendEnable && !supported.extendedDynamicState3ColorBlendEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ColorBlendEnable"); + } + if (requested.extendedDynamicState3ColorBlendEquation && !supported.extendedDynamicState3ColorBlendEquation) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ColorBlendEquation"); + } + if (requested.extendedDynamicState3ColorWriteMask && !supported.extendedDynamicState3ColorWriteMask) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ColorWriteMask"); + } + if (requested.extendedDynamicState3RasterizationStream && !supported.extendedDynamicState3RasterizationStream) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3RasterizationStream"); + } + if (requested.extendedDynamicState3ConservativeRasterizationMode && !supported.extendedDynamicState3ConservativeRasterizationMode) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ConservativeRasterizationMode"); + } + if (requested.extendedDynamicState3ExtraPrimitiveOverestimationSize && !supported.extendedDynamicState3ExtraPrimitiveOverestimationSize) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ExtraPrimitiveOverestimationSize"); + } + if (requested.extendedDynamicState3DepthClipEnable && !supported.extendedDynamicState3DepthClipEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3DepthClipEnable"); + } + if (requested.extendedDynamicState3SampleLocationsEnable && !supported.extendedDynamicState3SampleLocationsEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3SampleLocationsEnable"); + } + if (requested.extendedDynamicState3ColorBlendAdvanced && !supported.extendedDynamicState3ColorBlendAdvanced) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ColorBlendAdvanced"); + } + if (requested.extendedDynamicState3ProvokingVertexMode && !supported.extendedDynamicState3ProvokingVertexMode) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ProvokingVertexMode"); + } + if (requested.extendedDynamicState3LineRasterizationMode && !supported.extendedDynamicState3LineRasterizationMode) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3LineRasterizationMode"); + } + if (requested.extendedDynamicState3LineStippleEnable && !supported.extendedDynamicState3LineStippleEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3LineStippleEnable"); + } + if (requested.extendedDynamicState3DepthClipNegativeOneToOne && !supported.extendedDynamicState3DepthClipNegativeOneToOne) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3DepthClipNegativeOneToOne"); + } + if (requested.extendedDynamicState3ViewportWScalingEnable && !supported.extendedDynamicState3ViewportWScalingEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ViewportWScalingEnable"); + } + if (requested.extendedDynamicState3ViewportSwizzle && !supported.extendedDynamicState3ViewportSwizzle) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ViewportSwizzle"); + } + if (requested.extendedDynamicState3CoverageToColorEnable && !supported.extendedDynamicState3CoverageToColorEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3CoverageToColorEnable"); + } + if (requested.extendedDynamicState3CoverageToColorLocation && !supported.extendedDynamicState3CoverageToColorLocation) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3CoverageToColorLocation"); + } + if (requested.extendedDynamicState3CoverageModulationMode && !supported.extendedDynamicState3CoverageModulationMode) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3CoverageModulationMode"); + } + if (requested.extendedDynamicState3CoverageModulationTableEnable && !supported.extendedDynamicState3CoverageModulationTableEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3CoverageModulationTableEnable"); + } + if (requested.extendedDynamicState3CoverageModulationTable && !supported.extendedDynamicState3CoverageModulationTable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3CoverageModulationTable"); + } + if (requested.extendedDynamicState3CoverageReductionMode && !supported.extendedDynamicState3CoverageReductionMode) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3CoverageReductionMode"); + } + if (requested.extendedDynamicState3RepresentativeFragmentTestEnable && !supported.extendedDynamicState3RepresentativeFragmentTestEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3RepresentativeFragmentTestEnable"); + } + if (requested.extendedDynamicState3ShadingRateImageEnable && !supported.extendedDynamicState3ShadingRateImageEnable) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedDynamicState3FeaturesEXT::extendedDynamicState3ShadingRateImageEnable"); + } +} +void merge_VkPhysicalDeviceExtendedDynamicState3FeaturesEXT(VkPhysicalDeviceExtendedDynamicState3FeaturesEXT & current, VkPhysicalDeviceExtendedDynamicState3FeaturesEXT const& merge_in) { + current.extendedDynamicState3TessellationDomainOrigin = current.extendedDynamicState3TessellationDomainOrigin || merge_in.extendedDynamicState3TessellationDomainOrigin; + current.extendedDynamicState3DepthClampEnable = current.extendedDynamicState3DepthClampEnable || merge_in.extendedDynamicState3DepthClampEnable; + current.extendedDynamicState3PolygonMode = current.extendedDynamicState3PolygonMode || merge_in.extendedDynamicState3PolygonMode; + current.extendedDynamicState3RasterizationSamples = current.extendedDynamicState3RasterizationSamples || merge_in.extendedDynamicState3RasterizationSamples; + current.extendedDynamicState3SampleMask = current.extendedDynamicState3SampleMask || merge_in.extendedDynamicState3SampleMask; + current.extendedDynamicState3AlphaToCoverageEnable = current.extendedDynamicState3AlphaToCoverageEnable || merge_in.extendedDynamicState3AlphaToCoverageEnable; + current.extendedDynamicState3AlphaToOneEnable = current.extendedDynamicState3AlphaToOneEnable || merge_in.extendedDynamicState3AlphaToOneEnable; + current.extendedDynamicState3LogicOpEnable = current.extendedDynamicState3LogicOpEnable || merge_in.extendedDynamicState3LogicOpEnable; + current.extendedDynamicState3ColorBlendEnable = current.extendedDynamicState3ColorBlendEnable || merge_in.extendedDynamicState3ColorBlendEnable; + current.extendedDynamicState3ColorBlendEquation = current.extendedDynamicState3ColorBlendEquation || merge_in.extendedDynamicState3ColorBlendEquation; + current.extendedDynamicState3ColorWriteMask = current.extendedDynamicState3ColorWriteMask || merge_in.extendedDynamicState3ColorWriteMask; + current.extendedDynamicState3RasterizationStream = current.extendedDynamicState3RasterizationStream || merge_in.extendedDynamicState3RasterizationStream; + current.extendedDynamicState3ConservativeRasterizationMode = current.extendedDynamicState3ConservativeRasterizationMode || merge_in.extendedDynamicState3ConservativeRasterizationMode; + current.extendedDynamicState3ExtraPrimitiveOverestimationSize = current.extendedDynamicState3ExtraPrimitiveOverestimationSize || merge_in.extendedDynamicState3ExtraPrimitiveOverestimationSize; + current.extendedDynamicState3DepthClipEnable = current.extendedDynamicState3DepthClipEnable || merge_in.extendedDynamicState3DepthClipEnable; + current.extendedDynamicState3SampleLocationsEnable = current.extendedDynamicState3SampleLocationsEnable || merge_in.extendedDynamicState3SampleLocationsEnable; + current.extendedDynamicState3ColorBlendAdvanced = current.extendedDynamicState3ColorBlendAdvanced || merge_in.extendedDynamicState3ColorBlendAdvanced; + current.extendedDynamicState3ProvokingVertexMode = current.extendedDynamicState3ProvokingVertexMode || merge_in.extendedDynamicState3ProvokingVertexMode; + current.extendedDynamicState3LineRasterizationMode = current.extendedDynamicState3LineRasterizationMode || merge_in.extendedDynamicState3LineRasterizationMode; + current.extendedDynamicState3LineStippleEnable = current.extendedDynamicState3LineStippleEnable || merge_in.extendedDynamicState3LineStippleEnable; + current.extendedDynamicState3DepthClipNegativeOneToOne = current.extendedDynamicState3DepthClipNegativeOneToOne || merge_in.extendedDynamicState3DepthClipNegativeOneToOne; + current.extendedDynamicState3ViewportWScalingEnable = current.extendedDynamicState3ViewportWScalingEnable || merge_in.extendedDynamicState3ViewportWScalingEnable; + current.extendedDynamicState3ViewportSwizzle = current.extendedDynamicState3ViewportSwizzle || merge_in.extendedDynamicState3ViewportSwizzle; + current.extendedDynamicState3CoverageToColorEnable = current.extendedDynamicState3CoverageToColorEnable || merge_in.extendedDynamicState3CoverageToColorEnable; + current.extendedDynamicState3CoverageToColorLocation = current.extendedDynamicState3CoverageToColorLocation || merge_in.extendedDynamicState3CoverageToColorLocation; + current.extendedDynamicState3CoverageModulationMode = current.extendedDynamicState3CoverageModulationMode || merge_in.extendedDynamicState3CoverageModulationMode; + current.extendedDynamicState3CoverageModulationTableEnable = current.extendedDynamicState3CoverageModulationTableEnable || merge_in.extendedDynamicState3CoverageModulationTableEnable; + current.extendedDynamicState3CoverageModulationTable = current.extendedDynamicState3CoverageModulationTable || merge_in.extendedDynamicState3CoverageModulationTable; + current.extendedDynamicState3CoverageReductionMode = current.extendedDynamicState3CoverageReductionMode || merge_in.extendedDynamicState3CoverageReductionMode; + current.extendedDynamicState3RepresentativeFragmentTestEnable = current.extendedDynamicState3RepresentativeFragmentTestEnable || merge_in.extendedDynamicState3RepresentativeFragmentTestEnable; + current.extendedDynamicState3ShadingRateImageEnable = current.extendedDynamicState3ShadingRateImageEnable || merge_in.extendedDynamicState3ShadingRateImageEnable; +} +#endif //(defined(VK_EXT_extended_dynamic_state3)) +#if (defined(VK_EXT_subpass_merge_feedback)) +void compare_VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT(std::vector & error_list, VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT const& supported, VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT const& requested) { + if (requested.subpassMergeFeedback && !supported.subpassMergeFeedback) { + error_list.push_back("Missing feature VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT::subpassMergeFeedback"); + } +} +void merge_VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT(VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT & current, VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT const& merge_in) { + current.subpassMergeFeedback = current.subpassMergeFeedback || merge_in.subpassMergeFeedback; +} +#endif //(defined(VK_EXT_subpass_merge_feedback)) +#if (defined(VK_ARM_tensors)) +void compare_VkPhysicalDeviceTensorFeaturesARM(std::vector & error_list, VkPhysicalDeviceTensorFeaturesARM const& supported, VkPhysicalDeviceTensorFeaturesARM const& requested) { + if (requested.tensorNonPacked && !supported.tensorNonPacked) { + error_list.push_back("Missing feature VkPhysicalDeviceTensorFeaturesARM::tensorNonPacked"); + } + if (requested.shaderTensorAccess && !supported.shaderTensorAccess) { + error_list.push_back("Missing feature VkPhysicalDeviceTensorFeaturesARM::shaderTensorAccess"); + } + if (requested.shaderStorageTensorArrayDynamicIndexing && !supported.shaderStorageTensorArrayDynamicIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceTensorFeaturesARM::shaderStorageTensorArrayDynamicIndexing"); + } + if (requested.shaderStorageTensorArrayNonUniformIndexing && !supported.shaderStorageTensorArrayNonUniformIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceTensorFeaturesARM::shaderStorageTensorArrayNonUniformIndexing"); + } + if (requested.descriptorBindingStorageTensorUpdateAfterBind && !supported.descriptorBindingStorageTensorUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceTensorFeaturesARM::descriptorBindingStorageTensorUpdateAfterBind"); + } + if (requested.tensors && !supported.tensors) { + error_list.push_back("Missing feature VkPhysicalDeviceTensorFeaturesARM::tensors"); + } +} +void merge_VkPhysicalDeviceTensorFeaturesARM(VkPhysicalDeviceTensorFeaturesARM & current, VkPhysicalDeviceTensorFeaturesARM const& merge_in) { + current.tensorNonPacked = current.tensorNonPacked || merge_in.tensorNonPacked; + current.shaderTensorAccess = current.shaderTensorAccess || merge_in.shaderTensorAccess; + current.shaderStorageTensorArrayDynamicIndexing = current.shaderStorageTensorArrayDynamicIndexing || merge_in.shaderStorageTensorArrayDynamicIndexing; + current.shaderStorageTensorArrayNonUniformIndexing = current.shaderStorageTensorArrayNonUniformIndexing || merge_in.shaderStorageTensorArrayNonUniformIndexing; + current.descriptorBindingStorageTensorUpdateAfterBind = current.descriptorBindingStorageTensorUpdateAfterBind || merge_in.descriptorBindingStorageTensorUpdateAfterBind; + current.tensors = current.tensors || merge_in.tensors; +} +#endif //(defined(VK_ARM_tensors)) +#if (defined(VK_ARM_tensors)) +void compare_VkPhysicalDeviceDescriptorBufferTensorFeaturesARM(std::vector & error_list, VkPhysicalDeviceDescriptorBufferTensorFeaturesARM const& supported, VkPhysicalDeviceDescriptorBufferTensorFeaturesARM const& requested) { + if (requested.descriptorBufferTensorDescriptors && !supported.descriptorBufferTensorDescriptors) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorBufferTensorFeaturesARM::descriptorBufferTensorDescriptors"); + } +} +void merge_VkPhysicalDeviceDescriptorBufferTensorFeaturesARM(VkPhysicalDeviceDescriptorBufferTensorFeaturesARM & current, VkPhysicalDeviceDescriptorBufferTensorFeaturesARM const& merge_in) { + current.descriptorBufferTensorDescriptors = current.descriptorBufferTensorDescriptors || merge_in.descriptorBufferTensorDescriptors; +} +#endif //(defined(VK_ARM_tensors)) +#if (defined(VK_EXT_shader_module_identifier)) +void compare_VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT const& supported, VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT const& requested) { + if (requested.shaderModuleIdentifier && !supported.shaderModuleIdentifier) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT::shaderModuleIdentifier"); + } +} +void merge_VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT(VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT & current, VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT const& merge_in) { + current.shaderModuleIdentifier = current.shaderModuleIdentifier || merge_in.shaderModuleIdentifier; +} +#endif //(defined(VK_EXT_shader_module_identifier)) +#if (defined(VK_NV_optical_flow)) +void compare_VkPhysicalDeviceOpticalFlowFeaturesNV(std::vector & error_list, VkPhysicalDeviceOpticalFlowFeaturesNV const& supported, VkPhysicalDeviceOpticalFlowFeaturesNV const& requested) { + if (requested.opticalFlow && !supported.opticalFlow) { + error_list.push_back("Missing feature VkPhysicalDeviceOpticalFlowFeaturesNV::opticalFlow"); + } +} +void merge_VkPhysicalDeviceOpticalFlowFeaturesNV(VkPhysicalDeviceOpticalFlowFeaturesNV & current, VkPhysicalDeviceOpticalFlowFeaturesNV const& merge_in) { + current.opticalFlow = current.opticalFlow || merge_in.opticalFlow; +} +#endif //(defined(VK_NV_optical_flow)) +#if (defined(VK_EXT_legacy_dithering)) +void compare_VkPhysicalDeviceLegacyDitheringFeaturesEXT(std::vector & error_list, VkPhysicalDeviceLegacyDitheringFeaturesEXT const& supported, VkPhysicalDeviceLegacyDitheringFeaturesEXT const& requested) { + if (requested.legacyDithering && !supported.legacyDithering) { + error_list.push_back("Missing feature VkPhysicalDeviceLegacyDitheringFeaturesEXT::legacyDithering"); + } +} +void merge_VkPhysicalDeviceLegacyDitheringFeaturesEXT(VkPhysicalDeviceLegacyDitheringFeaturesEXT & current, VkPhysicalDeviceLegacyDitheringFeaturesEXT const& merge_in) { + current.legacyDithering = current.legacyDithering || merge_in.legacyDithering; +} +#endif //(defined(VK_EXT_legacy_dithering)) +#if defined(VK_USE_PLATFORM_ANDROID_KHR) && (defined(VK_ANDROID_external_format_resolve)) +void compare_VkPhysicalDeviceExternalFormatResolveFeaturesANDROID(std::vector & error_list, VkPhysicalDeviceExternalFormatResolveFeaturesANDROID const& supported, VkPhysicalDeviceExternalFormatResolveFeaturesANDROID const& requested) { + if (requested.externalFormatResolve && !supported.externalFormatResolve) { + error_list.push_back("Missing feature VkPhysicalDeviceExternalFormatResolveFeaturesANDROID::externalFormatResolve"); + } +} +void merge_VkPhysicalDeviceExternalFormatResolveFeaturesANDROID(VkPhysicalDeviceExternalFormatResolveFeaturesANDROID & current, VkPhysicalDeviceExternalFormatResolveFeaturesANDROID const& merge_in) { + current.externalFormatResolve = current.externalFormatResolve || merge_in.externalFormatResolve; +} +#endif //defined(VK_USE_PLATFORM_ANDROID_KHR) && (defined(VK_ANDROID_external_format_resolve)) +#if (defined(VK_AMD_anti_lag)) +void compare_VkPhysicalDeviceAntiLagFeaturesAMD(std::vector & error_list, VkPhysicalDeviceAntiLagFeaturesAMD const& supported, VkPhysicalDeviceAntiLagFeaturesAMD const& requested) { + if (requested.antiLag && !supported.antiLag) { + error_list.push_back("Missing feature VkPhysicalDeviceAntiLagFeaturesAMD::antiLag"); + } +} +void merge_VkPhysicalDeviceAntiLagFeaturesAMD(VkPhysicalDeviceAntiLagFeaturesAMD & current, VkPhysicalDeviceAntiLagFeaturesAMD const& merge_in) { + current.antiLag = current.antiLag || merge_in.antiLag; +} +#endif //(defined(VK_AMD_anti_lag)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_dense_geometry_format)) +void compare_VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX(std::vector & error_list, VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX const& supported, VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX const& requested) { + if (requested.denseGeometryFormat && !supported.denseGeometryFormat) { + error_list.push_back("Missing feature VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX::denseGeometryFormat"); + } +} +void merge_VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX(VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX & current, VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX const& merge_in) { + current.denseGeometryFormat = current.denseGeometryFormat || merge_in.denseGeometryFormat; +} +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_dense_geometry_format)) +#if (defined(VK_EXT_shader_object)) +void compare_VkPhysicalDeviceShaderObjectFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderObjectFeaturesEXT const& supported, VkPhysicalDeviceShaderObjectFeaturesEXT const& requested) { + if (requested.shaderObject && !supported.shaderObject) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderObjectFeaturesEXT::shaderObject"); + } +} +void merge_VkPhysicalDeviceShaderObjectFeaturesEXT(VkPhysicalDeviceShaderObjectFeaturesEXT & current, VkPhysicalDeviceShaderObjectFeaturesEXT const& merge_in) { + current.shaderObject = current.shaderObject || merge_in.shaderObject; +} +#endif //(defined(VK_EXT_shader_object)) +#if (defined(VK_QCOM_tile_properties)) +void compare_VkPhysicalDeviceTilePropertiesFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceTilePropertiesFeaturesQCOM const& supported, VkPhysicalDeviceTilePropertiesFeaturesQCOM const& requested) { + if (requested.tileProperties && !supported.tileProperties) { + error_list.push_back("Missing feature VkPhysicalDeviceTilePropertiesFeaturesQCOM::tileProperties"); + } +} +void merge_VkPhysicalDeviceTilePropertiesFeaturesQCOM(VkPhysicalDeviceTilePropertiesFeaturesQCOM & current, VkPhysicalDeviceTilePropertiesFeaturesQCOM const& merge_in) { + current.tileProperties = current.tileProperties || merge_in.tileProperties; +} +#endif //(defined(VK_QCOM_tile_properties)) +#if (defined(VK_SEC_amigo_profiling)) +void compare_VkPhysicalDeviceAmigoProfilingFeaturesSEC(std::vector & error_list, VkPhysicalDeviceAmigoProfilingFeaturesSEC const& supported, VkPhysicalDeviceAmigoProfilingFeaturesSEC const& requested) { + if (requested.amigoProfiling && !supported.amigoProfiling) { + error_list.push_back("Missing feature VkPhysicalDeviceAmigoProfilingFeaturesSEC::amigoProfiling"); + } +} +void merge_VkPhysicalDeviceAmigoProfilingFeaturesSEC(VkPhysicalDeviceAmigoProfilingFeaturesSEC & current, VkPhysicalDeviceAmigoProfilingFeaturesSEC const& merge_in) { + current.amigoProfiling = current.amigoProfiling || merge_in.amigoProfiling; +} +#endif //(defined(VK_SEC_amigo_profiling)) +#if (defined(VK_QCOM_multiview_per_view_viewports)) +void compare_VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM const& supported, VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM const& requested) { + if (requested.multiviewPerViewViewports && !supported.multiviewPerViewViewports) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM::multiviewPerViewViewports"); + } +} +void merge_VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM(VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM & current, VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM const& merge_in) { + current.multiviewPerViewViewports = current.multiviewPerViewViewports || merge_in.multiviewPerViewViewports; +} +#endif //(defined(VK_QCOM_multiview_per_view_viewports)) +#if (defined(VK_NV_ray_tracing_invocation_reorder)) +void compare_VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV(std::vector & error_list, VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV const& supported, VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV const& requested) { + if (requested.rayTracingInvocationReorder && !supported.rayTracingInvocationReorder) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV::rayTracingInvocationReorder"); + } +} +void merge_VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV(VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV & current, VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV const& merge_in) { + current.rayTracingInvocationReorder = current.rayTracingInvocationReorder || merge_in.rayTracingInvocationReorder; +} +#endif //(defined(VK_NV_ray_tracing_invocation_reorder)) +#if (defined(VK_NV_cooperative_vector)) +void compare_VkPhysicalDeviceCooperativeVectorFeaturesNV(std::vector & error_list, VkPhysicalDeviceCooperativeVectorFeaturesNV const& supported, VkPhysicalDeviceCooperativeVectorFeaturesNV const& requested) { + if (requested.cooperativeVector && !supported.cooperativeVector) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeVectorFeaturesNV::cooperativeVector"); + } + if (requested.cooperativeVectorTraining && !supported.cooperativeVectorTraining) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeVectorFeaturesNV::cooperativeVectorTraining"); + } +} +void merge_VkPhysicalDeviceCooperativeVectorFeaturesNV(VkPhysicalDeviceCooperativeVectorFeaturesNV & current, VkPhysicalDeviceCooperativeVectorFeaturesNV const& merge_in) { + current.cooperativeVector = current.cooperativeVector || merge_in.cooperativeVector; + current.cooperativeVectorTraining = current.cooperativeVectorTraining || merge_in.cooperativeVectorTraining; +} +#endif //(defined(VK_NV_cooperative_vector)) +#if (defined(VK_NV_extended_sparse_address_space)) +void compare_VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV(std::vector & error_list, VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV const& supported, VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV const& requested) { + if (requested.extendedSparseAddressSpace && !supported.extendedSparseAddressSpace) { + error_list.push_back("Missing feature VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV::extendedSparseAddressSpace"); + } +} +void merge_VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV(VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV & current, VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV const& merge_in) { + current.extendedSparseAddressSpace = current.extendedSparseAddressSpace || merge_in.extendedSparseAddressSpace; +} +#endif //(defined(VK_NV_extended_sparse_address_space)) +#if (defined(VK_EXT_legacy_vertex_attributes)) +void compare_VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT(std::vector & error_list, VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const& supported, VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const& requested) { + if (requested.legacyVertexAttributes && !supported.legacyVertexAttributes) { + error_list.push_back("Missing feature VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT::legacyVertexAttributes"); + } +} +void merge_VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT(VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT & current, VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const& merge_in) { + current.legacyVertexAttributes = current.legacyVertexAttributes || merge_in.legacyVertexAttributes; +} +#endif //(defined(VK_EXT_legacy_vertex_attributes)) +#if (defined(VK_ARM_shader_core_builtins)) +void compare_VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM(std::vector & error_list, VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM const& supported, VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM const& requested) { + if (requested.shaderCoreBuiltins && !supported.shaderCoreBuiltins) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM::shaderCoreBuiltins"); + } +} +void merge_VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM(VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM & current, VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM const& merge_in) { + current.shaderCoreBuiltins = current.shaderCoreBuiltins || merge_in.shaderCoreBuiltins; +} +#endif //(defined(VK_ARM_shader_core_builtins)) +#if (defined(VK_EXT_pipeline_library_group_handles)) +void compare_VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT(std::vector & error_list, VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT const& supported, VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT const& requested) { + if (requested.pipelineLibraryGroupHandles && !supported.pipelineLibraryGroupHandles) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT::pipelineLibraryGroupHandles"); + } +} +void merge_VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT(VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT & current, VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT const& merge_in) { + current.pipelineLibraryGroupHandles = current.pipelineLibraryGroupHandles || merge_in.pipelineLibraryGroupHandles; +} +#endif //(defined(VK_EXT_pipeline_library_group_handles)) +#if (defined(VK_EXT_dynamic_rendering_unused_attachments)) +void compare_VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT const& supported, VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT const& requested) { + if (requested.dynamicRenderingUnusedAttachments && !supported.dynamicRenderingUnusedAttachments) { + error_list.push_back("Missing feature VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT::dynamicRenderingUnusedAttachments"); + } +} +void merge_VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT(VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT & current, VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT const& merge_in) { + current.dynamicRenderingUnusedAttachments = current.dynamicRenderingUnusedAttachments || merge_in.dynamicRenderingUnusedAttachments; +} +#endif //(defined(VK_EXT_dynamic_rendering_unused_attachments)) +#if (defined(VK_ARM_data_graph)) +void compare_VkPhysicalDeviceDataGraphFeaturesARM(std::vector & error_list, VkPhysicalDeviceDataGraphFeaturesARM const& supported, VkPhysicalDeviceDataGraphFeaturesARM const& requested) { + if (requested.dataGraph && !supported.dataGraph) { + error_list.push_back("Missing feature VkPhysicalDeviceDataGraphFeaturesARM::dataGraph"); + } + if (requested.dataGraphUpdateAfterBind && !supported.dataGraphUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceDataGraphFeaturesARM::dataGraphUpdateAfterBind"); + } + if (requested.dataGraphSpecializationConstants && !supported.dataGraphSpecializationConstants) { + error_list.push_back("Missing feature VkPhysicalDeviceDataGraphFeaturesARM::dataGraphSpecializationConstants"); + } + if (requested.dataGraphDescriptorBuffer && !supported.dataGraphDescriptorBuffer) { + error_list.push_back("Missing feature VkPhysicalDeviceDataGraphFeaturesARM::dataGraphDescriptorBuffer"); + } + if (requested.dataGraphShaderModule && !supported.dataGraphShaderModule) { + error_list.push_back("Missing feature VkPhysicalDeviceDataGraphFeaturesARM::dataGraphShaderModule"); + } +} +void merge_VkPhysicalDeviceDataGraphFeaturesARM(VkPhysicalDeviceDataGraphFeaturesARM & current, VkPhysicalDeviceDataGraphFeaturesARM const& merge_in) { + current.dataGraph = current.dataGraph || merge_in.dataGraph; + current.dataGraphUpdateAfterBind = current.dataGraphUpdateAfterBind || merge_in.dataGraphUpdateAfterBind; + current.dataGraphSpecializationConstants = current.dataGraphSpecializationConstants || merge_in.dataGraphSpecializationConstants; + current.dataGraphDescriptorBuffer = current.dataGraphDescriptorBuffer || merge_in.dataGraphDescriptorBuffer; + current.dataGraphShaderModule = current.dataGraphShaderModule || merge_in.dataGraphShaderModule; +} +#endif //(defined(VK_ARM_data_graph)) +#if (defined(VK_QCOM_multiview_per_view_render_areas)) +void compare_VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM const& supported, VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM const& requested) { + if (requested.multiviewPerViewRenderAreas && !supported.multiviewPerViewRenderAreas) { + error_list.push_back("Missing feature VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM::multiviewPerViewRenderAreas"); + } +} +void merge_VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM(VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM & current, VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM const& merge_in) { + current.multiviewPerViewRenderAreas = current.multiviewPerViewRenderAreas || merge_in.multiviewPerViewRenderAreas; +} +#endif //(defined(VK_QCOM_multiview_per_view_render_areas)) +#if (defined(VK_NV_per_stage_descriptor_set)) +void compare_VkPhysicalDevicePerStageDescriptorSetFeaturesNV(std::vector & error_list, VkPhysicalDevicePerStageDescriptorSetFeaturesNV const& supported, VkPhysicalDevicePerStageDescriptorSetFeaturesNV const& requested) { + if (requested.perStageDescriptorSet && !supported.perStageDescriptorSet) { + error_list.push_back("Missing feature VkPhysicalDevicePerStageDescriptorSetFeaturesNV::perStageDescriptorSet"); + } + if (requested.dynamicPipelineLayout && !supported.dynamicPipelineLayout) { + error_list.push_back("Missing feature VkPhysicalDevicePerStageDescriptorSetFeaturesNV::dynamicPipelineLayout"); + } +} +void merge_VkPhysicalDevicePerStageDescriptorSetFeaturesNV(VkPhysicalDevicePerStageDescriptorSetFeaturesNV & current, VkPhysicalDevicePerStageDescriptorSetFeaturesNV const& merge_in) { + current.perStageDescriptorSet = current.perStageDescriptorSet || merge_in.perStageDescriptorSet; + current.dynamicPipelineLayout = current.dynamicPipelineLayout || merge_in.dynamicPipelineLayout; +} +#endif //(defined(VK_NV_per_stage_descriptor_set)) +#if (defined(VK_QCOM_image_processing2)) +void compare_VkPhysicalDeviceImageProcessing2FeaturesQCOM(std::vector & error_list, VkPhysicalDeviceImageProcessing2FeaturesQCOM const& supported, VkPhysicalDeviceImageProcessing2FeaturesQCOM const& requested) { + if (requested.textureBlockMatch2 && !supported.textureBlockMatch2) { + error_list.push_back("Missing feature VkPhysicalDeviceImageProcessing2FeaturesQCOM::textureBlockMatch2"); + } +} +void merge_VkPhysicalDeviceImageProcessing2FeaturesQCOM(VkPhysicalDeviceImageProcessing2FeaturesQCOM & current, VkPhysicalDeviceImageProcessing2FeaturesQCOM const& merge_in) { + current.textureBlockMatch2 = current.textureBlockMatch2 || merge_in.textureBlockMatch2; +} +#endif //(defined(VK_QCOM_image_processing2)) +#if (defined(VK_QCOM_filter_cubic_weights)) +void compare_VkPhysicalDeviceCubicWeightsFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceCubicWeightsFeaturesQCOM const& supported, VkPhysicalDeviceCubicWeightsFeaturesQCOM const& requested) { + if (requested.selectableCubicWeights && !supported.selectableCubicWeights) { + error_list.push_back("Missing feature VkPhysicalDeviceCubicWeightsFeaturesQCOM::selectableCubicWeights"); + } +} +void merge_VkPhysicalDeviceCubicWeightsFeaturesQCOM(VkPhysicalDeviceCubicWeightsFeaturesQCOM & current, VkPhysicalDeviceCubicWeightsFeaturesQCOM const& merge_in) { + current.selectableCubicWeights = current.selectableCubicWeights || merge_in.selectableCubicWeights; +} +#endif //(defined(VK_QCOM_filter_cubic_weights)) +#if (defined(VK_QCOM_ycbcr_degamma)) +void compare_VkPhysicalDeviceYcbcrDegammaFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceYcbcrDegammaFeaturesQCOM const& supported, VkPhysicalDeviceYcbcrDegammaFeaturesQCOM const& requested) { + if (requested.ycbcrDegamma && !supported.ycbcrDegamma) { + error_list.push_back("Missing feature VkPhysicalDeviceYcbcrDegammaFeaturesQCOM::ycbcrDegamma"); + } +} +void merge_VkPhysicalDeviceYcbcrDegammaFeaturesQCOM(VkPhysicalDeviceYcbcrDegammaFeaturesQCOM & current, VkPhysicalDeviceYcbcrDegammaFeaturesQCOM const& merge_in) { + current.ycbcrDegamma = current.ycbcrDegamma || merge_in.ycbcrDegamma; +} +#endif //(defined(VK_QCOM_ycbcr_degamma)) +#if (defined(VK_QCOM_filter_cubic_clamp)) +void compare_VkPhysicalDeviceCubicClampFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceCubicClampFeaturesQCOM const& supported, VkPhysicalDeviceCubicClampFeaturesQCOM const& requested) { + if (requested.cubicRangeClamp && !supported.cubicRangeClamp) { + error_list.push_back("Missing feature VkPhysicalDeviceCubicClampFeaturesQCOM::cubicRangeClamp"); + } +} +void merge_VkPhysicalDeviceCubicClampFeaturesQCOM(VkPhysicalDeviceCubicClampFeaturesQCOM & current, VkPhysicalDeviceCubicClampFeaturesQCOM const& merge_in) { + current.cubicRangeClamp = current.cubicRangeClamp || merge_in.cubicRangeClamp; +} +#endif //(defined(VK_QCOM_filter_cubic_clamp)) +#if (defined(VK_EXT_attachment_feedback_loop_dynamic_state)) +void compare_VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT(std::vector & error_list, VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT const& supported, VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT const& requested) { + if (requested.attachmentFeedbackLoopDynamicState && !supported.attachmentFeedbackLoopDynamicState) { + error_list.push_back("Missing feature VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT::attachmentFeedbackLoopDynamicState"); + } +} +void merge_VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT(VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT & current, VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT const& merge_in) { + current.attachmentFeedbackLoopDynamicState = current.attachmentFeedbackLoopDynamicState || merge_in.attachmentFeedbackLoopDynamicState; +} +#endif //(defined(VK_EXT_attachment_feedback_loop_dynamic_state)) +#if defined(VK_USE_PLATFORM_SCREEN_QNX) && (defined(VK_QNX_external_memory_screen_buffer)) +void compare_VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX(std::vector & error_list, VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX const& supported, VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX const& requested) { + if (requested.screenBufferImport && !supported.screenBufferImport) { + error_list.push_back("Missing feature VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX::screenBufferImport"); + } +} +void merge_VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX(VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX & current, VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX const& merge_in) { + current.screenBufferImport = current.screenBufferImport || merge_in.screenBufferImport; +} +#endif //defined(VK_USE_PLATFORM_SCREEN_QNX) && (defined(VK_QNX_external_memory_screen_buffer)) +#if (defined(VK_NV_descriptor_pool_overallocation)) +void compare_VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV(std::vector & error_list, VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV const& supported, VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV const& requested) { + if (requested.descriptorPoolOverallocation && !supported.descriptorPoolOverallocation) { + error_list.push_back("Missing feature VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV::descriptorPoolOverallocation"); + } +} +void merge_VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV(VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV & current, VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV const& merge_in) { + current.descriptorPoolOverallocation = current.descriptorPoolOverallocation || merge_in.descriptorPoolOverallocation; +} +#endif //(defined(VK_NV_descriptor_pool_overallocation)) +#if (defined(VK_QCOM_tile_memory_heap)) +void compare_VkPhysicalDeviceTileMemoryHeapFeaturesQCOM(std::vector & error_list, VkPhysicalDeviceTileMemoryHeapFeaturesQCOM const& supported, VkPhysicalDeviceTileMemoryHeapFeaturesQCOM const& requested) { + if (requested.tileMemoryHeap && !supported.tileMemoryHeap) { + error_list.push_back("Missing feature VkPhysicalDeviceTileMemoryHeapFeaturesQCOM::tileMemoryHeap"); + } +} +void merge_VkPhysicalDeviceTileMemoryHeapFeaturesQCOM(VkPhysicalDeviceTileMemoryHeapFeaturesQCOM & current, VkPhysicalDeviceTileMemoryHeapFeaturesQCOM const& merge_in) { + current.tileMemoryHeap = current.tileMemoryHeap || merge_in.tileMemoryHeap; +} +#endif //(defined(VK_QCOM_tile_memory_heap)) +#if (defined(VK_NV_raw_access_chains)) +void compare_VkPhysicalDeviceRawAccessChainsFeaturesNV(std::vector & error_list, VkPhysicalDeviceRawAccessChainsFeaturesNV const& supported, VkPhysicalDeviceRawAccessChainsFeaturesNV const& requested) { + if (requested.shaderRawAccessChains && !supported.shaderRawAccessChains) { + error_list.push_back("Missing feature VkPhysicalDeviceRawAccessChainsFeaturesNV::shaderRawAccessChains"); + } +} +void merge_VkPhysicalDeviceRawAccessChainsFeaturesNV(VkPhysicalDeviceRawAccessChainsFeaturesNV & current, VkPhysicalDeviceRawAccessChainsFeaturesNV const& merge_in) { + current.shaderRawAccessChains = current.shaderRawAccessChains || merge_in.shaderRawAccessChains; +} +#endif //(defined(VK_NV_raw_access_chains)) +#if (defined(VK_NV_command_buffer_inheritance)) +void compare_VkPhysicalDeviceCommandBufferInheritanceFeaturesNV(std::vector & error_list, VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const& supported, VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const& requested) { + if (requested.commandBufferInheritance && !supported.commandBufferInheritance) { + error_list.push_back("Missing feature VkPhysicalDeviceCommandBufferInheritanceFeaturesNV::commandBufferInheritance"); + } +} +void merge_VkPhysicalDeviceCommandBufferInheritanceFeaturesNV(VkPhysicalDeviceCommandBufferInheritanceFeaturesNV & current, VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const& merge_in) { + current.commandBufferInheritance = current.commandBufferInheritance || merge_in.commandBufferInheritance; +} +#endif //(defined(VK_NV_command_buffer_inheritance)) +#if (defined(VK_NV_shader_atomic_float16_vector)) +void compare_VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV(std::vector & error_list, VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV const& supported, VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV const& requested) { + if (requested.shaderFloat16VectorAtomics && !supported.shaderFloat16VectorAtomics) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV::shaderFloat16VectorAtomics"); + } +} +void merge_VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV(VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV & current, VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV const& merge_in) { + current.shaderFloat16VectorAtomics = current.shaderFloat16VectorAtomics || merge_in.shaderFloat16VectorAtomics; +} +#endif //(defined(VK_NV_shader_atomic_float16_vector)) +#if (defined(VK_EXT_shader_replicated_composites)) +void compare_VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const& supported, VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const& requested) { + if (requested.shaderReplicatedComposites && !supported.shaderReplicatedComposites) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT::shaderReplicatedComposites"); + } +} +void merge_VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT(VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT & current, VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const& merge_in) { + current.shaderReplicatedComposites = current.shaderReplicatedComposites || merge_in.shaderReplicatedComposites; +} +#endif //(defined(VK_EXT_shader_replicated_composites)) +#if (defined(VK_EXT_shader_float8)) +void compare_VkPhysicalDeviceShaderFloat8FeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderFloat8FeaturesEXT const& supported, VkPhysicalDeviceShaderFloat8FeaturesEXT const& requested) { + if (requested.shaderFloat8 && !supported.shaderFloat8) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFloat8FeaturesEXT::shaderFloat8"); + } + if (requested.shaderFloat8CooperativeMatrix && !supported.shaderFloat8CooperativeMatrix) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderFloat8FeaturesEXT::shaderFloat8CooperativeMatrix"); + } +} +void merge_VkPhysicalDeviceShaderFloat8FeaturesEXT(VkPhysicalDeviceShaderFloat8FeaturesEXT & current, VkPhysicalDeviceShaderFloat8FeaturesEXT const& merge_in) { + current.shaderFloat8 = current.shaderFloat8 || merge_in.shaderFloat8; + current.shaderFloat8CooperativeMatrix = current.shaderFloat8CooperativeMatrix || merge_in.shaderFloat8CooperativeMatrix; +} +#endif //(defined(VK_EXT_shader_float8)) +#if (defined(VK_NV_ray_tracing_validation)) +void compare_VkPhysicalDeviceRayTracingValidationFeaturesNV(std::vector & error_list, VkPhysicalDeviceRayTracingValidationFeaturesNV const& supported, VkPhysicalDeviceRayTracingValidationFeaturesNV const& requested) { + if (requested.rayTracingValidation && !supported.rayTracingValidation) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingValidationFeaturesNV::rayTracingValidation"); + } +} +void merge_VkPhysicalDeviceRayTracingValidationFeaturesNV(VkPhysicalDeviceRayTracingValidationFeaturesNV & current, VkPhysicalDeviceRayTracingValidationFeaturesNV const& merge_in) { + current.rayTracingValidation = current.rayTracingValidation || merge_in.rayTracingValidation; +} +#endif //(defined(VK_NV_ray_tracing_validation)) +#if (defined(VK_NV_cluster_acceleration_structure)) +void compare_VkPhysicalDeviceClusterAccelerationStructureFeaturesNV(std::vector & error_list, VkPhysicalDeviceClusterAccelerationStructureFeaturesNV const& supported, VkPhysicalDeviceClusterAccelerationStructureFeaturesNV const& requested) { + if (requested.clusterAccelerationStructure && !supported.clusterAccelerationStructure) { + error_list.push_back("Missing feature VkPhysicalDeviceClusterAccelerationStructureFeaturesNV::clusterAccelerationStructure"); + } +} +void merge_VkPhysicalDeviceClusterAccelerationStructureFeaturesNV(VkPhysicalDeviceClusterAccelerationStructureFeaturesNV & current, VkPhysicalDeviceClusterAccelerationStructureFeaturesNV const& merge_in) { + current.clusterAccelerationStructure = current.clusterAccelerationStructure || merge_in.clusterAccelerationStructure; +} +#endif //(defined(VK_NV_cluster_acceleration_structure)) +#if (defined(VK_NV_partitioned_acceleration_structure)) +void compare_VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV(std::vector & error_list, VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV const& supported, VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV const& requested) { + if (requested.partitionedAccelerationStructure && !supported.partitionedAccelerationStructure) { + error_list.push_back("Missing feature VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV::partitionedAccelerationStructure"); + } +} +void merge_VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV(VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV & current, VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV const& merge_in) { + current.partitionedAccelerationStructure = current.partitionedAccelerationStructure || merge_in.partitionedAccelerationStructure; +} +#endif //(defined(VK_NV_partitioned_acceleration_structure)) +#if (defined(VK_EXT_device_generated_commands)) +void compare_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT const& supported, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT const& requested) { + if (requested.deviceGeneratedCommands && !supported.deviceGeneratedCommands) { + error_list.push_back("Missing feature VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT::deviceGeneratedCommands"); + } + if (requested.dynamicGeneratedPipelineLayout && !supported.dynamicGeneratedPipelineLayout) { + error_list.push_back("Missing feature VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT::dynamicGeneratedPipelineLayout"); + } +} +void merge_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT(VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT & current, VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT const& merge_in) { + current.deviceGeneratedCommands = current.deviceGeneratedCommands || merge_in.deviceGeneratedCommands; + current.dynamicGeneratedPipelineLayout = current.dynamicGeneratedPipelineLayout || merge_in.dynamicGeneratedPipelineLayout; +} +#endif //(defined(VK_EXT_device_generated_commands)) +#if (defined(VK_MESA_image_alignment_control)) +void compare_VkPhysicalDeviceImageAlignmentControlFeaturesMESA(std::vector & error_list, VkPhysicalDeviceImageAlignmentControlFeaturesMESA const& supported, VkPhysicalDeviceImageAlignmentControlFeaturesMESA const& requested) { + if (requested.imageAlignmentControl && !supported.imageAlignmentControl) { + error_list.push_back("Missing feature VkPhysicalDeviceImageAlignmentControlFeaturesMESA::imageAlignmentControl"); + } +} +void merge_VkPhysicalDeviceImageAlignmentControlFeaturesMESA(VkPhysicalDeviceImageAlignmentControlFeaturesMESA & current, VkPhysicalDeviceImageAlignmentControlFeaturesMESA const& merge_in) { + current.imageAlignmentControl = current.imageAlignmentControl || merge_in.imageAlignmentControl; +} +#endif //(defined(VK_MESA_image_alignment_control)) +#if (defined(VK_EXT_depth_clamp_control)) +void compare_VkPhysicalDeviceDepthClampControlFeaturesEXT(std::vector & error_list, VkPhysicalDeviceDepthClampControlFeaturesEXT const& supported, VkPhysicalDeviceDepthClampControlFeaturesEXT const& requested) { + if (requested.depthClampControl && !supported.depthClampControl) { + error_list.push_back("Missing feature VkPhysicalDeviceDepthClampControlFeaturesEXT::depthClampControl"); + } +} +void merge_VkPhysicalDeviceDepthClampControlFeaturesEXT(VkPhysicalDeviceDepthClampControlFeaturesEXT & current, VkPhysicalDeviceDepthClampControlFeaturesEXT const& merge_in) { + current.depthClampControl = current.depthClampControl || merge_in.depthClampControl; +} +#endif //(defined(VK_EXT_depth_clamp_control)) +#if (defined(VK_HUAWEI_hdr_vivid)) +void compare_VkPhysicalDeviceHdrVividFeaturesHUAWEI(std::vector & error_list, VkPhysicalDeviceHdrVividFeaturesHUAWEI const& supported, VkPhysicalDeviceHdrVividFeaturesHUAWEI const& requested) { + if (requested.hdrVivid && !supported.hdrVivid) { + error_list.push_back("Missing feature VkPhysicalDeviceHdrVividFeaturesHUAWEI::hdrVivid"); + } +} +void merge_VkPhysicalDeviceHdrVividFeaturesHUAWEI(VkPhysicalDeviceHdrVividFeaturesHUAWEI & current, VkPhysicalDeviceHdrVividFeaturesHUAWEI const& merge_in) { + current.hdrVivid = current.hdrVivid || merge_in.hdrVivid; +} +#endif //(defined(VK_HUAWEI_hdr_vivid)) +#if (defined(VK_NV_cooperative_matrix2)) +void compare_VkPhysicalDeviceCooperativeMatrix2FeaturesNV(std::vector & error_list, VkPhysicalDeviceCooperativeMatrix2FeaturesNV const& supported, VkPhysicalDeviceCooperativeMatrix2FeaturesNV const& requested) { + if (requested.cooperativeMatrixWorkgroupScope && !supported.cooperativeMatrixWorkgroupScope) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrix2FeaturesNV::cooperativeMatrixWorkgroupScope"); + } + if (requested.cooperativeMatrixFlexibleDimensions && !supported.cooperativeMatrixFlexibleDimensions) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrix2FeaturesNV::cooperativeMatrixFlexibleDimensions"); + } + if (requested.cooperativeMatrixReductions && !supported.cooperativeMatrixReductions) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrix2FeaturesNV::cooperativeMatrixReductions"); + } + if (requested.cooperativeMatrixConversions && !supported.cooperativeMatrixConversions) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrix2FeaturesNV::cooperativeMatrixConversions"); + } + if (requested.cooperativeMatrixPerElementOperations && !supported.cooperativeMatrixPerElementOperations) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrix2FeaturesNV::cooperativeMatrixPerElementOperations"); + } + if (requested.cooperativeMatrixTensorAddressing && !supported.cooperativeMatrixTensorAddressing) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrix2FeaturesNV::cooperativeMatrixTensorAddressing"); + } + if (requested.cooperativeMatrixBlockLoads && !supported.cooperativeMatrixBlockLoads) { + error_list.push_back("Missing feature VkPhysicalDeviceCooperativeMatrix2FeaturesNV::cooperativeMatrixBlockLoads"); + } +} +void merge_VkPhysicalDeviceCooperativeMatrix2FeaturesNV(VkPhysicalDeviceCooperativeMatrix2FeaturesNV & current, VkPhysicalDeviceCooperativeMatrix2FeaturesNV const& merge_in) { + current.cooperativeMatrixWorkgroupScope = current.cooperativeMatrixWorkgroupScope || merge_in.cooperativeMatrixWorkgroupScope; + current.cooperativeMatrixFlexibleDimensions = current.cooperativeMatrixFlexibleDimensions || merge_in.cooperativeMatrixFlexibleDimensions; + current.cooperativeMatrixReductions = current.cooperativeMatrixReductions || merge_in.cooperativeMatrixReductions; + current.cooperativeMatrixConversions = current.cooperativeMatrixConversions || merge_in.cooperativeMatrixConversions; + current.cooperativeMatrixPerElementOperations = current.cooperativeMatrixPerElementOperations || merge_in.cooperativeMatrixPerElementOperations; + current.cooperativeMatrixTensorAddressing = current.cooperativeMatrixTensorAddressing || merge_in.cooperativeMatrixTensorAddressing; + current.cooperativeMatrixBlockLoads = current.cooperativeMatrixBlockLoads || merge_in.cooperativeMatrixBlockLoads; +} +#endif //(defined(VK_NV_cooperative_matrix2)) +#if (defined(VK_ARM_pipeline_opacity_micromap)) +void compare_VkPhysicalDevicePipelineOpacityMicromapFeaturesARM(std::vector & error_list, VkPhysicalDevicePipelineOpacityMicromapFeaturesARM const& supported, VkPhysicalDevicePipelineOpacityMicromapFeaturesARM const& requested) { + if (requested.pipelineOpacityMicromap && !supported.pipelineOpacityMicromap) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineOpacityMicromapFeaturesARM::pipelineOpacityMicromap"); + } +} +void merge_VkPhysicalDevicePipelineOpacityMicromapFeaturesARM(VkPhysicalDevicePipelineOpacityMicromapFeaturesARM & current, VkPhysicalDevicePipelineOpacityMicromapFeaturesARM const& merge_in) { + current.pipelineOpacityMicromap = current.pipelineOpacityMicromap || merge_in.pipelineOpacityMicromap; +} +#endif //(defined(VK_ARM_pipeline_opacity_micromap)) +#if (defined(VK_EXT_vertex_attribute_robustness)) +void compare_VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT(std::vector & error_list, VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT const& supported, VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT const& requested) { + if (requested.vertexAttributeRobustness && !supported.vertexAttributeRobustness) { + error_list.push_back("Missing feature VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT::vertexAttributeRobustness"); + } +} +void merge_VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT(VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT & current, VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT const& merge_in) { + current.vertexAttributeRobustness = current.vertexAttributeRobustness || merge_in.vertexAttributeRobustness; +} +#endif //(defined(VK_EXT_vertex_attribute_robustness)) +#if (defined(VK_ARM_format_pack)) +void compare_VkPhysicalDeviceFormatPackFeaturesARM(std::vector & error_list, VkPhysicalDeviceFormatPackFeaturesARM const& supported, VkPhysicalDeviceFormatPackFeaturesARM const& requested) { + if (requested.formatPack && !supported.formatPack) { + error_list.push_back("Missing feature VkPhysicalDeviceFormatPackFeaturesARM::formatPack"); + } +} +void merge_VkPhysicalDeviceFormatPackFeaturesARM(VkPhysicalDeviceFormatPackFeaturesARM & current, VkPhysicalDeviceFormatPackFeaturesARM const& merge_in) { + current.formatPack = current.formatPack || merge_in.formatPack; +} +#endif //(defined(VK_ARM_format_pack)) +#if (defined(VK_VALVE_fragment_density_map_layered)) +void compare_VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE(std::vector & error_list, VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE const& supported, VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE const& requested) { + if (requested.fragmentDensityMapLayered && !supported.fragmentDensityMapLayered) { + error_list.push_back("Missing feature VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE::fragmentDensityMapLayered"); + } +} +void merge_VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE(VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE & current, VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE const& merge_in) { + current.fragmentDensityMapLayered = current.fragmentDensityMapLayered || merge_in.fragmentDensityMapLayered; +} +#endif //(defined(VK_VALVE_fragment_density_map_layered)) +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_present_metering)) +void compare_VkPhysicalDevicePresentMeteringFeaturesNV(std::vector & error_list, VkPhysicalDevicePresentMeteringFeaturesNV const& supported, VkPhysicalDevicePresentMeteringFeaturesNV const& requested) { + if (requested.presentMetering && !supported.presentMetering) { + error_list.push_back("Missing feature VkPhysicalDevicePresentMeteringFeaturesNV::presentMetering"); + } +} +void merge_VkPhysicalDevicePresentMeteringFeaturesNV(VkPhysicalDevicePresentMeteringFeaturesNV & current, VkPhysicalDevicePresentMeteringFeaturesNV const& merge_in) { + current.presentMetering = current.presentMetering || merge_in.presentMetering; +} +#endif //defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_present_metering)) +#if (defined(VK_EXT_zero_initialize_device_memory)) +void compare_VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT(std::vector & error_list, VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT const& supported, VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT const& requested) { + if (requested.zeroInitializeDeviceMemory && !supported.zeroInitializeDeviceMemory) { + error_list.push_back("Missing feature VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT::zeroInitializeDeviceMemory"); + } +} +void merge_VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT(VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT & current, VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT const& merge_in) { + current.zeroInitializeDeviceMemory = current.zeroInitializeDeviceMemory || merge_in.zeroInitializeDeviceMemory; +} +#endif //(defined(VK_EXT_zero_initialize_device_memory)) +#if (defined(VK_EXT_shader_64bit_indexing)) +void compare_VkPhysicalDeviceShader64BitIndexingFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShader64BitIndexingFeaturesEXT const& supported, VkPhysicalDeviceShader64BitIndexingFeaturesEXT const& requested) { + if (requested.shader64BitIndexing && !supported.shader64BitIndexing) { + error_list.push_back("Missing feature VkPhysicalDeviceShader64BitIndexingFeaturesEXT::shader64BitIndexing"); + } +} +void merge_VkPhysicalDeviceShader64BitIndexingFeaturesEXT(VkPhysicalDeviceShader64BitIndexingFeaturesEXT & current, VkPhysicalDeviceShader64BitIndexingFeaturesEXT const& merge_in) { + current.shader64BitIndexing = current.shader64BitIndexing || merge_in.shader64BitIndexing; +} +#endif //(defined(VK_EXT_shader_64bit_indexing)) +#if (defined(VK_SEC_pipeline_cache_incremental_mode)) +void compare_VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC(std::vector & error_list, VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC const& supported, VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC const& requested) { + if (requested.pipelineCacheIncrementalMode && !supported.pipelineCacheIncrementalMode) { + error_list.push_back("Missing feature VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC::pipelineCacheIncrementalMode"); + } +} +void merge_VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC(VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC & current, VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC const& merge_in) { + current.pipelineCacheIncrementalMode = current.pipelineCacheIncrementalMode || merge_in.pipelineCacheIncrementalMode; +} +#endif //(defined(VK_SEC_pipeline_cache_incremental_mode)) +#if (defined(VK_EXT_shader_uniform_buffer_unsized_array)) +void compare_VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT(std::vector & error_list, VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT const& supported, VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT const& requested) { + if (requested.shaderUniformBufferUnsizedArray && !supported.shaderUniformBufferUnsizedArray) { + error_list.push_back("Missing feature VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT::shaderUniformBufferUnsizedArray"); + } +} +void merge_VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT(VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT & current, VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT const& merge_in) { + current.shaderUniformBufferUnsizedArray = current.shaderUniformBufferUnsizedArray || merge_in.shaderUniformBufferUnsizedArray; +} +#endif //(defined(VK_EXT_shader_uniform_buffer_unsized_array)) +#if (defined(VK_KHR_acceleration_structure)) +void compare_VkPhysicalDeviceAccelerationStructureFeaturesKHR(std::vector & error_list, VkPhysicalDeviceAccelerationStructureFeaturesKHR const& supported, VkPhysicalDeviceAccelerationStructureFeaturesKHR const& requested) { + if (requested.accelerationStructure && !supported.accelerationStructure) { + error_list.push_back("Missing feature VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructure"); + } + if (requested.accelerationStructureCaptureReplay && !supported.accelerationStructureCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureCaptureReplay"); + } + if (requested.accelerationStructureIndirectBuild && !supported.accelerationStructureIndirectBuild) { + error_list.push_back("Missing feature VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureIndirectBuild"); + } + if (requested.accelerationStructureHostCommands && !supported.accelerationStructureHostCommands) { + error_list.push_back("Missing feature VkPhysicalDeviceAccelerationStructureFeaturesKHR::accelerationStructureHostCommands"); + } + if (requested.descriptorBindingAccelerationStructureUpdateAfterBind && !supported.descriptorBindingAccelerationStructureUpdateAfterBind) { + error_list.push_back("Missing feature VkPhysicalDeviceAccelerationStructureFeaturesKHR::descriptorBindingAccelerationStructureUpdateAfterBind"); + } +} +void merge_VkPhysicalDeviceAccelerationStructureFeaturesKHR(VkPhysicalDeviceAccelerationStructureFeaturesKHR & current, VkPhysicalDeviceAccelerationStructureFeaturesKHR const& merge_in) { + current.accelerationStructure = current.accelerationStructure || merge_in.accelerationStructure; + current.accelerationStructureCaptureReplay = current.accelerationStructureCaptureReplay || merge_in.accelerationStructureCaptureReplay; + current.accelerationStructureIndirectBuild = current.accelerationStructureIndirectBuild || merge_in.accelerationStructureIndirectBuild; + current.accelerationStructureHostCommands = current.accelerationStructureHostCommands || merge_in.accelerationStructureHostCommands; + current.descriptorBindingAccelerationStructureUpdateAfterBind = current.descriptorBindingAccelerationStructureUpdateAfterBind || merge_in.descriptorBindingAccelerationStructureUpdateAfterBind; +} +#endif //(defined(VK_KHR_acceleration_structure)) +#if (defined(VK_KHR_ray_tracing_pipeline)) +void compare_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(std::vector & error_list, VkPhysicalDeviceRayTracingPipelineFeaturesKHR const& supported, VkPhysicalDeviceRayTracingPipelineFeaturesKHR const& requested) { + if (requested.rayTracingPipeline && !supported.rayTracingPipeline) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingPipelineFeaturesKHR::rayTracingPipeline"); + } + if (requested.rayTracingPipelineShaderGroupHandleCaptureReplay && !supported.rayTracingPipelineShaderGroupHandleCaptureReplay) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingPipelineFeaturesKHR::rayTracingPipelineShaderGroupHandleCaptureReplay"); + } + if (requested.rayTracingPipelineShaderGroupHandleCaptureReplayMixed && !supported.rayTracingPipelineShaderGroupHandleCaptureReplayMixed) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingPipelineFeaturesKHR::rayTracingPipelineShaderGroupHandleCaptureReplayMixed"); + } + if (requested.rayTracingPipelineTraceRaysIndirect && !supported.rayTracingPipelineTraceRaysIndirect) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingPipelineFeaturesKHR::rayTracingPipelineTraceRaysIndirect"); + } + if (requested.rayTraversalPrimitiveCulling && !supported.rayTraversalPrimitiveCulling) { + error_list.push_back("Missing feature VkPhysicalDeviceRayTracingPipelineFeaturesKHR::rayTraversalPrimitiveCulling"); + } +} +void merge_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(VkPhysicalDeviceRayTracingPipelineFeaturesKHR & current, VkPhysicalDeviceRayTracingPipelineFeaturesKHR const& merge_in) { + current.rayTracingPipeline = current.rayTracingPipeline || merge_in.rayTracingPipeline; + current.rayTracingPipelineShaderGroupHandleCaptureReplay = current.rayTracingPipelineShaderGroupHandleCaptureReplay || merge_in.rayTracingPipelineShaderGroupHandleCaptureReplay; + current.rayTracingPipelineShaderGroupHandleCaptureReplayMixed = current.rayTracingPipelineShaderGroupHandleCaptureReplayMixed || merge_in.rayTracingPipelineShaderGroupHandleCaptureReplayMixed; + current.rayTracingPipelineTraceRaysIndirect = current.rayTracingPipelineTraceRaysIndirect || merge_in.rayTracingPipelineTraceRaysIndirect; + current.rayTraversalPrimitiveCulling = current.rayTraversalPrimitiveCulling || merge_in.rayTraversalPrimitiveCulling; +} +#endif //(defined(VK_KHR_ray_tracing_pipeline)) +#if (defined(VK_KHR_ray_query)) +void compare_VkPhysicalDeviceRayQueryFeaturesKHR(std::vector & error_list, VkPhysicalDeviceRayQueryFeaturesKHR const& supported, VkPhysicalDeviceRayQueryFeaturesKHR const& requested) { + if (requested.rayQuery && !supported.rayQuery) { + error_list.push_back("Missing feature VkPhysicalDeviceRayQueryFeaturesKHR::rayQuery"); + } +} +void merge_VkPhysicalDeviceRayQueryFeaturesKHR(VkPhysicalDeviceRayQueryFeaturesKHR & current, VkPhysicalDeviceRayQueryFeaturesKHR const& merge_in) { + current.rayQuery = current.rayQuery || merge_in.rayQuery; +} +#endif //(defined(VK_KHR_ray_query)) +#if (defined(VK_EXT_mesh_shader)) +void compare_VkPhysicalDeviceMeshShaderFeaturesEXT(std::vector & error_list, VkPhysicalDeviceMeshShaderFeaturesEXT const& supported, VkPhysicalDeviceMeshShaderFeaturesEXT const& requested) { + if (requested.taskShader && !supported.taskShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMeshShaderFeaturesEXT::taskShader"); + } + if (requested.meshShader && !supported.meshShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMeshShaderFeaturesEXT::meshShader"); + } + if (requested.multiviewMeshShader && !supported.multiviewMeshShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMeshShaderFeaturesEXT::multiviewMeshShader"); + } + if (requested.primitiveFragmentShadingRateMeshShader && !supported.primitiveFragmentShadingRateMeshShader) { + error_list.push_back("Missing feature VkPhysicalDeviceMeshShaderFeaturesEXT::primitiveFragmentShadingRateMeshShader"); + } + if (requested.meshShaderQueries && !supported.meshShaderQueries) { + error_list.push_back("Missing feature VkPhysicalDeviceMeshShaderFeaturesEXT::meshShaderQueries"); + } +} +void merge_VkPhysicalDeviceMeshShaderFeaturesEXT(VkPhysicalDeviceMeshShaderFeaturesEXT & current, VkPhysicalDeviceMeshShaderFeaturesEXT const& merge_in) { + current.taskShader = current.taskShader || merge_in.taskShader; + current.meshShader = current.meshShader || merge_in.meshShader; + current.multiviewMeshShader = current.multiviewMeshShader || merge_in.multiviewMeshShader; + current.primitiveFragmentShadingRateMeshShader = current.primitiveFragmentShadingRateMeshShader || merge_in.primitiveFragmentShadingRateMeshShader; + current.meshShaderQueries = current.meshShaderQueries || merge_in.meshShaderQueries; +} +#endif //(defined(VK_EXT_mesh_shader)) +void compare_feature_struct(VkStructureType sType, std::vector & error_list, const void* supported, const void* requested) { + switch (sType) { +#if (defined(VK_VERSION_1_1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES): + compare_VkPhysicalDeviceProtectedMemoryFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES): + compare_VkPhysicalDevice16BitStorageFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR): + compare_VkPhysicalDevice16BitStorageFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES): + compare_VkPhysicalDeviceVariablePointersFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES): + compare_VkPhysicalDeviceVariablePointerFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES): + compare_VkPhysicalDeviceVariablePointerFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES): + compare_VkPhysicalDeviceVariablePointersFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES): + compare_VkPhysicalDeviceSamplerYcbcrConversionFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR): + compare_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES): + compare_VkPhysicalDeviceMultiviewFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR): + compare_VkPhysicalDeviceMultiviewFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES): + compare_VkPhysicalDeviceShaderDrawParametersFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES): + compare_VkPhysicalDeviceShaderDrawParameterFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES): + compare_VkPhysicalDeviceVulkan11Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES): + compare_VkPhysicalDeviceVulkan12Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES): + compare_VkPhysicalDeviceVulkanMemoryModelFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR): + compare_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES): + compare_VkPhysicalDeviceHostQueryResetFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT): + compare_VkPhysicalDeviceHostQueryResetFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES): + compare_VkPhysicalDeviceTimelineSemaphoreFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR): + compare_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES): + compare_VkPhysicalDeviceBufferDeviceAddressFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR): + compare_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES): + compare_VkPhysicalDevice8BitStorageFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR): + compare_VkPhysicalDevice8BitStorageFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES): + compare_VkPhysicalDeviceShaderAtomicInt64Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR): + compare_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES): + compare_VkPhysicalDeviceShaderFloat16Int8Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR): + compare_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR): + compare_VkPhysicalDeviceFloat16Int8FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES): + compare_VkPhysicalDeviceDescriptorIndexingFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT): + compare_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES): + compare_VkPhysicalDeviceScalarBlockLayoutFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT): + compare_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES): + compare_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR): + compare_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES): + compare_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR): + compare_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES): + compare_VkPhysicalDeviceImagelessFramebufferFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR): + compare_VkPhysicalDeviceImagelessFramebufferFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES): + compare_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR): + compare_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES): + compare_VkPhysicalDeviceVulkan13Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES): + compare_VkPhysicalDevicePrivateDataFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT): + compare_VkPhysicalDevicePrivateDataFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES): + compare_VkPhysicalDeviceSynchronization2Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR): + compare_VkPhysicalDeviceSynchronization2FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES): + compare_VkPhysicalDeviceTextureCompressionASTCHDRFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT): + compare_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES): + compare_VkPhysicalDeviceMaintenance4Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR): + compare_VkPhysicalDeviceMaintenance4FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES): + compare_VkPhysicalDeviceShaderTerminateInvocationFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR): + compare_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES): + compare_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT): + compare_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES): + compare_VkPhysicalDevicePipelineCreationCacheControlFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT): + compare_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES): + compare_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR): + compare_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES): + compare_VkPhysicalDeviceImageRobustnessFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT): + compare_VkPhysicalDeviceImageRobustnessFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES): + compare_VkPhysicalDeviceSubgroupSizeControlFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT): + compare_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES): + compare_VkPhysicalDeviceInlineUniformBlockFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT): + compare_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES): + compare_VkPhysicalDeviceShaderIntegerDotProductFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR): + compare_VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES): + compare_VkPhysicalDeviceDynamicRenderingFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES_KHR): + compare_VkPhysicalDeviceDynamicRenderingFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_FEATURES): + compare_VkPhysicalDeviceVulkan14Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority) || defined(VK_EXT_global_priority_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES): + compare_VkPhysicalDeviceGlobalPriorityQueryFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR): + compare_VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_global_priority_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR): + compare_VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8) || defined(VK_EXT_index_type_uint8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES): + compare_VkPhysicalDeviceIndexTypeUint8Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT): + compare_VkPhysicalDeviceIndexTypeUint8FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_index_type_uint8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT): + compare_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES): + compare_VkPhysicalDeviceMaintenance5Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR): + compare_VkPhysicalDeviceMaintenance5FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES): + compare_VkPhysicalDeviceMaintenance6Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR): + compare_VkPhysicalDeviceMaintenance6FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES): + compare_VkPhysicalDeviceHostImageCopyFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT): + compare_VkPhysicalDeviceHostImageCopyFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES): + compare_VkPhysicalDeviceShaderSubgroupRotateFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR): + compare_VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES): + compare_VkPhysicalDeviceShaderFloatControls2Features(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR): + compare_VkPhysicalDeviceShaderFloatControls2FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES): + compare_VkPhysicalDeviceShaderExpectAssumeFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR): + compare_VkPhysicalDeviceShaderExpectAssumeFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES): + compare_VkPhysicalDevicePipelineProtectedAccessFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT): + compare_VkPhysicalDevicePipelineProtectedAccessFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES): + compare_VkPhysicalDevicePipelineRobustnessFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT): + compare_VkPhysicalDevicePipelineRobustnessFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization) || defined(VK_EXT_line_rasterization)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES): + compare_VkPhysicalDeviceLineRasterizationFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT): + compare_VkPhysicalDeviceLineRasterizationFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_line_rasterization)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT): + compare_VkPhysicalDeviceLineRasterizationFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor) || defined(VK_EXT_vertex_attribute_divisor)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES): + compare_VkPhysicalDeviceVertexAttributeDivisorFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT): + compare_VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_vertex_attribute_divisor)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT): + compare_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES): + compare_VkPhysicalDeviceDynamicRenderingLocalReadFeatures(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR): + compare_VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_performance_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR): + compare_VkPhysicalDevicePerformanceQueryFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_shader_bfloat16)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR): + compare_VkPhysicalDeviceShaderBfloat16FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_KHR_portability_subset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR): + compare_VkPhysicalDevicePortabilitySubsetFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_shader_clock)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR): + compare_VkPhysicalDeviceShaderClockFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_fragment_shading_rate)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR): + compare_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_shader_quad_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_QUAD_CONTROL_FEATURES_KHR): + compare_VkPhysicalDeviceShaderQuadControlFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_present_wait)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_FEATURES_KHR): + compare_VkPhysicalDevicePresentWaitFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_pipeline_executable_properties)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR): + compare_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_present_id)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_FEATURES_KHR): + compare_VkPhysicalDevicePresentIdFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_fragment_shader_barycentric)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR): + compare_VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_NV_fragment_shader_barycentric)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV): + compare_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_shader_subgroup_uniform_control_flow)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR): + compare_VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_workgroup_memory_explicit_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR): + compare_VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_ray_tracing_maintenance1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MAINTENANCE_1_FEATURES_KHR): + compare_VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_shader_untyped_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_UNTYPED_POINTERS_FEATURES_KHR): + compare_VkPhysicalDeviceShaderUntypedPointersFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_shader_maximal_reconvergence)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MAXIMAL_RECONVERGENCE_FEATURES_KHR): + compare_VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_present_id2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_2_FEATURES_KHR): + compare_VkPhysicalDevicePresentId2FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_present_wait2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_2_FEATURES_KHR): + compare_VkPhysicalDevicePresentWait2FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_ray_tracing_position_fetch)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR): + compare_VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_pipeline_binary)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_BINARY_FEATURES_KHR): + compare_VkPhysicalDevicePipelineBinaryFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_swapchain_maintenance1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_KHR): + compare_VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_EXT_swapchain_maintenance1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_EXT): + compare_VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_cooperative_matrix)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR): + compare_VkPhysicalDeviceCooperativeMatrixFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_compute_shader_derivatives)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_KHR): + compare_VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_NV_compute_shader_derivatives)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV): + compare_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_video_encode_av1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_AV1_FEATURES_KHR): + compare_VkPhysicalDeviceVideoEncodeAV1FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_video_decode_vp9)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_DECODE_VP9_FEATURES_KHR): + compare_VkPhysicalDeviceVideoDecodeVP9FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_video_maintenance1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_MAINTENANCE_1_FEATURES_KHR): + compare_VkPhysicalDeviceVideoMaintenance1FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_unified_image_layouts)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFIED_IMAGE_LAYOUTS_FEATURES_KHR): + compare_VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_copy_memory_indirect)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_KHR): + compare_VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_video_encode_intra_refresh)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_INTRA_REFRESH_FEATURES_KHR): + compare_VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_video_encode_quantization_map)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_QUANTIZATION_MAP_FEATURES_KHR): + compare_VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_shader_relaxed_extended_instruction)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_RELAXED_EXTENDED_INSTRUCTION_FEATURES_KHR): + compare_VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_maintenance7)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_7_FEATURES_KHR): + compare_VkPhysicalDeviceMaintenance7FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_maintenance8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_8_FEATURES_KHR): + compare_VkPhysicalDeviceMaintenance8FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_shader_fma)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FMA_FEATURES_KHR): + compare_VkPhysicalDeviceShaderFmaFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_maintenance9)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_9_FEATURES_KHR): + compare_VkPhysicalDeviceMaintenance9FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_video_maintenance2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_MAINTENANCE_2_FEATURES_KHR): + compare_VkPhysicalDeviceVideoMaintenance2FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_depth_clamp_zero_one)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_KHR): + compare_VkPhysicalDeviceDepthClampZeroOneFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_EXT_depth_clamp_zero_one)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_EXT): + compare_VkPhysicalDeviceDepthClampZeroOneFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_robustness2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_KHR): + compare_VkPhysicalDeviceRobustness2FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_EXT_robustness2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT): + compare_VkPhysicalDeviceRobustness2FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_present_mode_fifo_latest_ready)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_MODE_FIFO_LATEST_READY_FEATURES_KHR): + compare_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_EXT_present_mode_fifo_latest_ready)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_MODE_FIFO_LATEST_READY_FEATURES_EXT): + compare_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_maintenance10)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_10_FEATURES_KHR): + compare_VkPhysicalDeviceMaintenance10FeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_transform_feedback)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT): + compare_VkPhysicalDeviceTransformFeedbackFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_corner_sampled_image)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV): + compare_VkPhysicalDeviceCornerSampledImageFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_astc_decode_mode)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT): + compare_VkPhysicalDeviceASTCDecodeFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_conditional_rendering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT): + compare_VkPhysicalDeviceConditionalRenderingFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_depth_clip_enable)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT): + compare_VkPhysicalDeviceDepthClipEnableFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_IMG_relaxed_line_rasterization)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RELAXED_LINE_RASTERIZATION_FEATURES_IMG): + compare_VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_shader_enqueue)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ENQUEUE_FEATURES_AMDX): + compare_VkPhysicalDeviceShaderEnqueueFeaturesAMDX(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_blend_operation_advanced)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT): + compare_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_shader_sm_builtins)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV): + compare_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_shading_rate_image)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV): + compare_VkPhysicalDeviceShadingRateImageFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_representative_fragment_test)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV): + compare_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_mesh_shader)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV): + compare_VkPhysicalDeviceMeshShaderFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_shader_image_footprint)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV): + compare_VkPhysicalDeviceShaderImageFootprintFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_scissor_exclusive)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV): + compare_VkPhysicalDeviceExclusiveScissorFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_INTEL_shader_integer_functions2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL): + compare_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_fragment_density_map)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT): + compare_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_AMD_device_coherent_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD): + compare_VkPhysicalDeviceCoherentMemoryFeaturesAMD(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_image_atomic_int64)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT): + compare_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_memory_priority)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT): + compare_VkPhysicalDeviceMemoryPriorityFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_dedicated_allocation_image_aliasing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV): + compare_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_buffer_device_address)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT): + compare_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_EXT_buffer_device_address)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT): + compare_VkPhysicalDeviceBufferAddressFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_cooperative_matrix)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV): + compare_VkPhysicalDeviceCooperativeMatrixFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_coverage_reduction_mode)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV): + compare_VkPhysicalDeviceCoverageReductionModeFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_fragment_shader_interlock)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT): + compare_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_ycbcr_image_arrays)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT): + compare_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_provoking_vertex)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT): + compare_VkPhysicalDeviceProvokingVertexFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_atomic_float)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT): + compare_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_extended_dynamic_state)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT): + compare_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_map_memory_placed)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT): + compare_VkPhysicalDeviceMapMemoryPlacedFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_atomic_float2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT): + compare_VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_device_generated_commands)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV): + compare_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_inherited_viewport_scissor)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV): + compare_VkPhysicalDeviceInheritedViewportScissorFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_texel_buffer_alignment)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT): + compare_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_depth_bias_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_BIAS_CONTROL_FEATURES_EXT): + compare_VkPhysicalDeviceDepthBiasControlFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_device_memory_report)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT): + compare_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_custom_border_color)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT): + compare_VkPhysicalDeviceCustomBorderColorFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_present_barrier)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_BARRIER_FEATURES_NV): + compare_VkPhysicalDevicePresentBarrierFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_device_diagnostics_config)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV): + compare_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_cuda_kernel_launch)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUDA_KERNEL_LAUNCH_FEATURES_NV): + compare_VkPhysicalDeviceCudaKernelLaunchFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_tile_shading)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_SHADING_FEATURES_QCOM): + compare_VkPhysicalDeviceTileShadingFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_FEATURES_EXT): + compare_VkPhysicalDeviceDescriptorBufferFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_graphics_pipeline_library)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT): + compare_VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_AMD_shader_early_and_late_fragment_tests)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD): + compare_VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_fragment_shading_rate_enums)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV): + compare_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_ray_tracing_motion_blur)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV): + compare_VkPhysicalDeviceRayTracingMotionBlurFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_ycbcr_2plane_444_formats)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT): + compare_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_fragment_density_map2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT): + compare_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_image_compression_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT): + compare_VkPhysicalDeviceImageCompressionControlFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_attachment_feedback_loop_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT): + compare_VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_4444_formats)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT): + compare_VkPhysicalDevice4444FormatsFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_device_fault)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT): + compare_VkPhysicalDeviceFaultFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_rasterization_order_attachment_access)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT): + compare_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_ARM_rasterization_order_attachment_access)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM): + compare_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_rgba10x6_formats)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT): + compare_VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_mutable_descriptor_type)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT): + compare_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_VALVE_mutable_descriptor_type)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE): + compare_VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_vertex_input_dynamic_state)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT): + compare_VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_device_address_binding_report)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ADDRESS_BINDING_REPORT_FEATURES_EXT): + compare_VkPhysicalDeviceAddressBindingReportFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_depth_clip_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT): + compare_VkPhysicalDeviceDepthClipControlFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_primitive_topology_list_restart)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT): + compare_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_HUAWEI_subpass_shading)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_FEATURES_HUAWEI): + compare_VkPhysicalDeviceSubpassShadingFeaturesHUAWEI(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_HUAWEI_invocation_mask)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INVOCATION_MASK_FEATURES_HUAWEI): + compare_VkPhysicalDeviceInvocationMaskFeaturesHUAWEI(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_external_memory_rdma)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_RDMA_FEATURES_NV): + compare_VkPhysicalDeviceExternalMemoryRDMAFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_pipeline_properties)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT): + compare_VkPhysicalDevicePipelinePropertiesFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_frame_boundary)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAME_BOUNDARY_FEATURES_EXT): + compare_VkPhysicalDeviceFrameBoundaryFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_multisampled_render_to_single_sampled)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_EXT): + compare_VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_extended_dynamic_state2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT): + compare_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_color_write_enable)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COLOR_WRITE_ENABLE_FEATURES_EXT): + compare_VkPhysicalDeviceColorWriteEnableFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_primitives_generated_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT): + compare_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VALVE_video_encode_rgb_conversion)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_RGB_CONVERSION_FEATURES_VALVE): + compare_VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_image_view_min_lod)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT): + compare_VkPhysicalDeviceImageViewMinLodFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_multi_draw)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT): + compare_VkPhysicalDeviceMultiDrawFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_image_2d_view_of_3d)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT): + compare_VkPhysicalDeviceImage2DViewOf3DFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_tile_image)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT): + compare_VkPhysicalDeviceShaderTileImageFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_opacity_micromap)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPACITY_MICROMAP_FEATURES_EXT): + compare_VkPhysicalDeviceOpacityMicromapFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_displacement_micromap)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV): + compare_VkPhysicalDeviceDisplacementMicromapFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_HUAWEI_cluster_culling_shader)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI): + compare_VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_border_color_swizzle)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT): + compare_VkPhysicalDeviceBorderColorSwizzleFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_pageable_device_local_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT): + compare_VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_ARM_scheduling_controls)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM): + compare_VkPhysicalDeviceSchedulingControlsFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_image_sliced_view_of_3d)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT): + compare_VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VALVE_descriptor_set_host_mapping)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE): + compare_VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_non_seamless_cube_map)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT): + compare_VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_ARM_render_pass_striped)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_FEATURES_ARM): + compare_VkPhysicalDeviceRenderPassStripedFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_fragment_density_map_offset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_EXT): + compare_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_QCOM_fragment_density_map_offset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM): + compare_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_copy_memory_indirect)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_NV): + compare_VkPhysicalDeviceCopyMemoryIndirectFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_memory_decompression)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_EXT): + compare_VkPhysicalDeviceMemoryDecompressionFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#elif (defined(VK_NV_memory_decompression)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_NV): + compare_VkPhysicalDeviceMemoryDecompressionFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_device_generated_commands_compute)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_COMPUTE_FEATURES_NV): + compare_VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_ray_tracing_linear_swept_spheres)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_LINEAR_SWEPT_SPHERES_FEATURES_NV): + compare_VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_linear_color_attachment)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV): + compare_VkPhysicalDeviceLinearColorAttachmentFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_image_compression_control_swapchain)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT): + compare_VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_image_processing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM): + compare_VkPhysicalDeviceImageProcessingFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_nested_command_buffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_FEATURES_EXT): + compare_VkPhysicalDeviceNestedCommandBufferFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_extended_dynamic_state3)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_FEATURES_EXT): + compare_VkPhysicalDeviceExtendedDynamicState3FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_subpass_merge_feedback)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT): + compare_VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_ARM_tensors)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TENSOR_FEATURES_ARM): + compare_VkPhysicalDeviceTensorFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_ARM_tensors)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_TENSOR_FEATURES_ARM): + compare_VkPhysicalDeviceDescriptorBufferTensorFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_module_identifier)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT): + compare_VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_optical_flow)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_FEATURES_NV): + compare_VkPhysicalDeviceOpticalFlowFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_legacy_dithering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT): + compare_VkPhysicalDeviceLegacyDitheringFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if defined(VK_USE_PLATFORM_ANDROID_KHR) && (defined(VK_ANDROID_external_format_resolve)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_FEATURES_ANDROID): + compare_VkPhysicalDeviceExternalFormatResolveFeaturesANDROID(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_AMD_anti_lag)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ANTI_LAG_FEATURES_AMD): + compare_VkPhysicalDeviceAntiLagFeaturesAMD(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_dense_geometry_format)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DENSE_GEOMETRY_FORMAT_FEATURES_AMDX): + compare_VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_object)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT): + compare_VkPhysicalDeviceShaderObjectFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_tile_properties)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM): + compare_VkPhysicalDeviceTilePropertiesFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_SEC_amigo_profiling)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC): + compare_VkPhysicalDeviceAmigoProfilingFeaturesSEC(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_multiview_per_view_viewports)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_VIEWPORTS_FEATURES_QCOM): + compare_VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_ray_tracing_invocation_reorder)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_FEATURES_NV): + compare_VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_cooperative_vector)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_VECTOR_FEATURES_NV): + compare_VkPhysicalDeviceCooperativeVectorFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_extended_sparse_address_space)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_FEATURES_NV): + compare_VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_legacy_vertex_attributes)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_VERTEX_ATTRIBUTES_FEATURES_EXT): + compare_VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_ARM_shader_core_builtins)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_FEATURES_ARM): + compare_VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_pipeline_library_group_handles)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT): + compare_VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_dynamic_rendering_unused_attachments)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT): + compare_VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_ARM_data_graph)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DATA_GRAPH_FEATURES_ARM): + compare_VkPhysicalDeviceDataGraphFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_multiview_per_view_render_areas)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM): + compare_VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_per_stage_descriptor_set)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PER_STAGE_DESCRIPTOR_SET_FEATURES_NV): + compare_VkPhysicalDevicePerStageDescriptorSetFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_image_processing2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_2_FEATURES_QCOM): + compare_VkPhysicalDeviceImageProcessing2FeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_filter_cubic_weights)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_WEIGHTS_FEATURES_QCOM): + compare_VkPhysicalDeviceCubicWeightsFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_ycbcr_degamma)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_DEGAMMA_FEATURES_QCOM): + compare_VkPhysicalDeviceYcbcrDegammaFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_filter_cubic_clamp)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_CLAMP_FEATURES_QCOM): + compare_VkPhysicalDeviceCubicClampFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_attachment_feedback_loop_dynamic_state)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT): + compare_VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if defined(VK_USE_PLATFORM_SCREEN_QNX) && (defined(VK_QNX_external_memory_screen_buffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCREEN_BUFFER_FEATURES_QNX): + compare_VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_descriptor_pool_overallocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV): + compare_VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_QCOM_tile_memory_heap)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_MEMORY_HEAP_FEATURES_QCOM): + compare_VkPhysicalDeviceTileMemoryHeapFeaturesQCOM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_raw_access_chains)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAW_ACCESS_CHAINS_FEATURES_NV): + compare_VkPhysicalDeviceRawAccessChainsFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_command_buffer_inheritance)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMMAND_BUFFER_INHERITANCE_FEATURES_NV): + compare_VkPhysicalDeviceCommandBufferInheritanceFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_shader_atomic_float16_vector)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT16_VECTOR_FEATURES_NV): + compare_VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_replicated_composites)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_REPLICATED_COMPOSITES_FEATURES_EXT): + compare_VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_float8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT8_FEATURES_EXT): + compare_VkPhysicalDeviceShaderFloat8FeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_ray_tracing_validation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_VALIDATION_FEATURES_NV): + compare_VkPhysicalDeviceRayTracingValidationFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_cluster_acceleration_structure)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_ACCELERATION_STRUCTURE_FEATURES_NV): + compare_VkPhysicalDeviceClusterAccelerationStructureFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_partitioned_acceleration_structure)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PARTITIONED_ACCELERATION_STRUCTURE_FEATURES_NV): + compare_VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_device_generated_commands)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_EXT): + compare_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_MESA_image_alignment_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ALIGNMENT_CONTROL_FEATURES_MESA): + compare_VkPhysicalDeviceImageAlignmentControlFeaturesMESA(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_depth_clamp_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_CONTROL_FEATURES_EXT): + compare_VkPhysicalDeviceDepthClampControlFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_HUAWEI_hdr_vivid)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HDR_VIVID_FEATURES_HUAWEI): + compare_VkPhysicalDeviceHdrVividFeaturesHUAWEI(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_NV_cooperative_matrix2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_2_FEATURES_NV): + compare_VkPhysicalDeviceCooperativeMatrix2FeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_ARM_pipeline_opacity_micromap)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_OPACITY_MICROMAP_FEATURES_ARM): + compare_VkPhysicalDevicePipelineOpacityMicromapFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_vertex_attribute_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_ROBUSTNESS_FEATURES_EXT): + compare_VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_ARM_format_pack)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FORMAT_PACK_FEATURES_ARM): + compare_VkPhysicalDeviceFormatPackFeaturesARM(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_VALVE_fragment_density_map_layered)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_LAYERED_FEATURES_VALVE): + compare_VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_present_metering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_METERING_FEATURES_NV): + compare_VkPhysicalDevicePresentMeteringFeaturesNV(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_zero_initialize_device_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_DEVICE_MEMORY_FEATURES_EXT): + compare_VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_64bit_indexing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_64_BIT_INDEXING_FEATURES_EXT): + compare_VkPhysicalDeviceShader64BitIndexingFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_SEC_pipeline_cache_incremental_mode)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CACHE_INCREMENTAL_MODE_FEATURES_SEC): + compare_VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_shader_uniform_buffer_unsized_array)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_UNIFORM_BUFFER_UNSIZED_ARRAY_FEATURES_EXT): + compare_VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_acceleration_structure)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR): + compare_VkPhysicalDeviceAccelerationStructureFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR): + compare_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_KHR_ray_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR): + compare_VkPhysicalDeviceRayQueryFeaturesKHR(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif +#if (defined(VK_EXT_mesh_shader)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_EXT): + compare_VkPhysicalDeviceMeshShaderFeaturesEXT(error_list, *reinterpret_cast(supported), *reinterpret_cast(requested)); + break; +#endif + default: + break; + } +} +void merge_feature_struct(VkStructureType sType, void* current, const void* merge_in) { + switch (sType) { +#if (defined(VK_VERSION_1_1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES): + merge_VkPhysicalDeviceProtectedMemoryFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES): + merge_VkPhysicalDevice16BitStorageFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_16bit_storage)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR): + merge_VkPhysicalDevice16BitStorageFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES): + merge_VkPhysicalDeviceVariablePointersFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES): + merge_VkPhysicalDeviceVariablePointerFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES): + merge_VkPhysicalDeviceVariablePointerFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_variable_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES): + merge_VkPhysicalDeviceVariablePointersFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES): + merge_VkPhysicalDeviceSamplerYcbcrConversionFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_sampler_ycbcr_conversion)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR): + merge_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES): + merge_VkPhysicalDeviceMultiviewFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_1) || defined(VK_KHR_multiview)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR): + merge_VkPhysicalDeviceMultiviewFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES): + merge_VkPhysicalDeviceShaderDrawParametersFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES): + merge_VkPhysicalDeviceShaderDrawParameterFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES): + merge_VkPhysicalDeviceVulkan11Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES): + merge_VkPhysicalDeviceVulkan12Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES): + merge_VkPhysicalDeviceVulkanMemoryModelFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_vulkan_memory_model)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR): + merge_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES): + merge_VkPhysicalDeviceHostQueryResetFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_EXT_host_query_reset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT): + merge_VkPhysicalDeviceHostQueryResetFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES): + merge_VkPhysicalDeviceTimelineSemaphoreFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_timeline_semaphore)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR): + merge_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES): + merge_VkPhysicalDeviceBufferDeviceAddressFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_buffer_device_address)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR): + merge_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES): + merge_VkPhysicalDevice8BitStorageFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_8bit_storage)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR): + merge_VkPhysicalDevice8BitStorageFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES): + merge_VkPhysicalDeviceShaderAtomicInt64Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_atomic_int64)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR): + merge_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES): + merge_VkPhysicalDeviceShaderFloat16Int8Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR): + merge_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_float16_int8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR): + merge_VkPhysicalDeviceFloat16Int8FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES): + merge_VkPhysicalDeviceDescriptorIndexingFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_EXT_descriptor_indexing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT): + merge_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES): + merge_VkPhysicalDeviceScalarBlockLayoutFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_EXT_scalar_block_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT): + merge_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES): + merge_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_uniform_buffer_standard_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR): + merge_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES): + merge_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_shader_subgroup_extended_types)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR): + merge_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES): + merge_VkPhysicalDeviceImagelessFramebufferFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_imageless_framebuffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR): + merge_VkPhysicalDeviceImagelessFramebufferFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES): + merge_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_2) || defined(VK_KHR_separate_depth_stencil_layouts)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR): + merge_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES): + merge_VkPhysicalDeviceVulkan13Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES): + merge_VkPhysicalDevicePrivateDataFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_private_data)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT): + merge_VkPhysicalDevicePrivateDataFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES): + merge_VkPhysicalDeviceSynchronization2Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR): + merge_VkPhysicalDeviceSynchronization2FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES): + merge_VkPhysicalDeviceTextureCompressionASTCHDRFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_texture_compression_astc_hdr)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT): + merge_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES): + merge_VkPhysicalDeviceMaintenance4Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_maintenance4)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR): + merge_VkPhysicalDeviceMaintenance4FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES): + merge_VkPhysicalDeviceShaderTerminateInvocationFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_terminate_invocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR): + merge_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES): + merge_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_shader_demote_to_helper_invocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT): + merge_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES): + merge_VkPhysicalDevicePipelineCreationCacheControlFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_pipeline_creation_cache_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT): + merge_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES): + merge_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_zero_initialize_workgroup_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR): + merge_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES): + merge_VkPhysicalDeviceImageRobustnessFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_image_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT): + merge_VkPhysicalDeviceImageRobustnessFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES): + merge_VkPhysicalDeviceSubgroupSizeControlFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_subgroup_size_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT): + merge_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES): + merge_VkPhysicalDeviceInlineUniformBlockFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_EXT_inline_uniform_block)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT): + merge_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES): + merge_VkPhysicalDeviceShaderIntegerDotProductFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_shader_integer_dot_product)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR): + merge_VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES): + merge_VkPhysicalDeviceDynamicRenderingFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_3) || defined(VK_KHR_dynamic_rendering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES_KHR): + merge_VkPhysicalDeviceDynamicRenderingFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_FEATURES): + merge_VkPhysicalDeviceVulkan14Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority) || defined(VK_EXT_global_priority_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES): + merge_VkPhysicalDeviceGlobalPriorityQueryFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_global_priority)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR): + merge_VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_global_priority_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR): + merge_VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8) || defined(VK_EXT_index_type_uint8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES): + merge_VkPhysicalDeviceIndexTypeUint8Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_index_type_uint8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT): + merge_VkPhysicalDeviceIndexTypeUint8FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_index_type_uint8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT): + merge_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES): + merge_VkPhysicalDeviceMaintenance5Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance5)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR): + merge_VkPhysicalDeviceMaintenance5FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES): + merge_VkPhysicalDeviceMaintenance6Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_maintenance6)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR): + merge_VkPhysicalDeviceMaintenance6FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES): + merge_VkPhysicalDeviceHostImageCopyFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_host_image_copy)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT): + merge_VkPhysicalDeviceHostImageCopyFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES): + merge_VkPhysicalDeviceShaderSubgroupRotateFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_subgroup_rotate)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR): + merge_VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES): + merge_VkPhysicalDeviceShaderFloatControls2Features(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_float_controls2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR): + merge_VkPhysicalDeviceShaderFloatControls2FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES): + merge_VkPhysicalDeviceShaderExpectAssumeFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_shader_expect_assume)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR): + merge_VkPhysicalDeviceShaderExpectAssumeFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES): + merge_VkPhysicalDevicePipelineProtectedAccessFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_protected_access)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT): + merge_VkPhysicalDevicePipelineProtectedAccessFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES): + merge_VkPhysicalDevicePipelineRobustnessFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_pipeline_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT): + merge_VkPhysicalDevicePipelineRobustnessFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization) || defined(VK_EXT_line_rasterization)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES): + merge_VkPhysicalDeviceLineRasterizationFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_line_rasterization)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT): + merge_VkPhysicalDeviceLineRasterizationFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_line_rasterization)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT): + merge_VkPhysicalDeviceLineRasterizationFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor) || defined(VK_EXT_vertex_attribute_divisor)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES): + merge_VkPhysicalDeviceVertexAttributeDivisorFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_vertex_attribute_divisor)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT): + merge_VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_EXT_vertex_attribute_divisor)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT): + merge_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES): + merge_VkPhysicalDeviceDynamicRenderingLocalReadFeatures(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VERSION_1_4) || defined(VK_KHR_dynamic_rendering_local_read)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR): + merge_VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_performance_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR): + merge_VkPhysicalDevicePerformanceQueryFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_shader_bfloat16)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR): + merge_VkPhysicalDeviceShaderBfloat16FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_KHR_portability_subset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR): + merge_VkPhysicalDevicePortabilitySubsetFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_shader_clock)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR): + merge_VkPhysicalDeviceShaderClockFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_fragment_shading_rate)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR): + merge_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_shader_quad_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_QUAD_CONTROL_FEATURES_KHR): + merge_VkPhysicalDeviceShaderQuadControlFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_present_wait)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_FEATURES_KHR): + merge_VkPhysicalDevicePresentWaitFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_pipeline_executable_properties)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR): + merge_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_present_id)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_FEATURES_KHR): + merge_VkPhysicalDevicePresentIdFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_fragment_shader_barycentric)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR): + merge_VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_NV_fragment_shader_barycentric)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV): + merge_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_shader_subgroup_uniform_control_flow)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR): + merge_VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_workgroup_memory_explicit_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR): + merge_VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_ray_tracing_maintenance1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MAINTENANCE_1_FEATURES_KHR): + merge_VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_shader_untyped_pointers)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_UNTYPED_POINTERS_FEATURES_KHR): + merge_VkPhysicalDeviceShaderUntypedPointersFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_shader_maximal_reconvergence)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MAXIMAL_RECONVERGENCE_FEATURES_KHR): + merge_VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_present_id2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_2_FEATURES_KHR): + merge_VkPhysicalDevicePresentId2FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_present_wait2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_2_FEATURES_KHR): + merge_VkPhysicalDevicePresentWait2FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_ray_tracing_position_fetch)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR): + merge_VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_pipeline_binary)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_BINARY_FEATURES_KHR): + merge_VkPhysicalDevicePipelineBinaryFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_swapchain_maintenance1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_KHR): + merge_VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_EXT_swapchain_maintenance1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_EXT): + merge_VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_cooperative_matrix)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR): + merge_VkPhysicalDeviceCooperativeMatrixFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_compute_shader_derivatives)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_KHR): + merge_VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_NV_compute_shader_derivatives)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV): + merge_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_video_encode_av1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_AV1_FEATURES_KHR): + merge_VkPhysicalDeviceVideoEncodeAV1FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_video_decode_vp9)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_DECODE_VP9_FEATURES_KHR): + merge_VkPhysicalDeviceVideoDecodeVP9FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_video_maintenance1)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_MAINTENANCE_1_FEATURES_KHR): + merge_VkPhysicalDeviceVideoMaintenance1FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_unified_image_layouts)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFIED_IMAGE_LAYOUTS_FEATURES_KHR): + merge_VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_copy_memory_indirect)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_KHR): + merge_VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_video_encode_intra_refresh)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_INTRA_REFRESH_FEATURES_KHR): + merge_VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_video_encode_quantization_map)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_QUANTIZATION_MAP_FEATURES_KHR): + merge_VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_shader_relaxed_extended_instruction)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_RELAXED_EXTENDED_INSTRUCTION_FEATURES_KHR): + merge_VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_maintenance7)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_7_FEATURES_KHR): + merge_VkPhysicalDeviceMaintenance7FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_maintenance8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_8_FEATURES_KHR): + merge_VkPhysicalDeviceMaintenance8FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_shader_fma)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FMA_FEATURES_KHR): + merge_VkPhysicalDeviceShaderFmaFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_maintenance9)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_9_FEATURES_KHR): + merge_VkPhysicalDeviceMaintenance9FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_video_maintenance2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_MAINTENANCE_2_FEATURES_KHR): + merge_VkPhysicalDeviceVideoMaintenance2FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_depth_clamp_zero_one)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_KHR): + merge_VkPhysicalDeviceDepthClampZeroOneFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_EXT_depth_clamp_zero_one)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_EXT): + merge_VkPhysicalDeviceDepthClampZeroOneFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_robustness2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_KHR): + merge_VkPhysicalDeviceRobustness2FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_EXT_robustness2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT): + merge_VkPhysicalDeviceRobustness2FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_present_mode_fifo_latest_ready)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_MODE_FIFO_LATEST_READY_FEATURES_KHR): + merge_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_EXT_present_mode_fifo_latest_ready)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_MODE_FIFO_LATEST_READY_FEATURES_EXT): + merge_VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_maintenance10)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_10_FEATURES_KHR): + merge_VkPhysicalDeviceMaintenance10FeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_transform_feedback)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT): + merge_VkPhysicalDeviceTransformFeedbackFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_corner_sampled_image)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV): + merge_VkPhysicalDeviceCornerSampledImageFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_astc_decode_mode)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT): + merge_VkPhysicalDeviceASTCDecodeFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_conditional_rendering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT): + merge_VkPhysicalDeviceConditionalRenderingFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_depth_clip_enable)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT): + merge_VkPhysicalDeviceDepthClipEnableFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_IMG_relaxed_line_rasterization)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RELAXED_LINE_RASTERIZATION_FEATURES_IMG): + merge_VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_shader_enqueue)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ENQUEUE_FEATURES_AMDX): + merge_VkPhysicalDeviceShaderEnqueueFeaturesAMDX(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_blend_operation_advanced)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT): + merge_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_shader_sm_builtins)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV): + merge_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_shading_rate_image)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV): + merge_VkPhysicalDeviceShadingRateImageFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_representative_fragment_test)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV): + merge_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_mesh_shader)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV): + merge_VkPhysicalDeviceMeshShaderFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_shader_image_footprint)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV): + merge_VkPhysicalDeviceShaderImageFootprintFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_scissor_exclusive)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV): + merge_VkPhysicalDeviceExclusiveScissorFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_INTEL_shader_integer_functions2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL): + merge_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_fragment_density_map)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT): + merge_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_AMD_device_coherent_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD): + merge_VkPhysicalDeviceCoherentMemoryFeaturesAMD(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_image_atomic_int64)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT): + merge_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_memory_priority)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT): + merge_VkPhysicalDeviceMemoryPriorityFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_dedicated_allocation_image_aliasing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV): + merge_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_buffer_device_address)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT): + merge_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_EXT_buffer_device_address)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT): + merge_VkPhysicalDeviceBufferAddressFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_cooperative_matrix)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV): + merge_VkPhysicalDeviceCooperativeMatrixFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_coverage_reduction_mode)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV): + merge_VkPhysicalDeviceCoverageReductionModeFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_fragment_shader_interlock)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT): + merge_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_ycbcr_image_arrays)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT): + merge_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_provoking_vertex)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT): + merge_VkPhysicalDeviceProvokingVertexFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_atomic_float)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT): + merge_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_extended_dynamic_state)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT): + merge_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_map_memory_placed)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT): + merge_VkPhysicalDeviceMapMemoryPlacedFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_atomic_float2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT): + merge_VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_device_generated_commands)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV): + merge_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_inherited_viewport_scissor)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV): + merge_VkPhysicalDeviceInheritedViewportScissorFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_texel_buffer_alignment)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT): + merge_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_depth_bias_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_BIAS_CONTROL_FEATURES_EXT): + merge_VkPhysicalDeviceDepthBiasControlFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_device_memory_report)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT): + merge_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_custom_border_color)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT): + merge_VkPhysicalDeviceCustomBorderColorFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_present_barrier)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_BARRIER_FEATURES_NV): + merge_VkPhysicalDevicePresentBarrierFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_device_diagnostics_config)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV): + merge_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_cuda_kernel_launch)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUDA_KERNEL_LAUNCH_FEATURES_NV): + merge_VkPhysicalDeviceCudaKernelLaunchFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_tile_shading)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_SHADING_FEATURES_QCOM): + merge_VkPhysicalDeviceTileShadingFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_descriptor_buffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_FEATURES_EXT): + merge_VkPhysicalDeviceDescriptorBufferFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_graphics_pipeline_library)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT): + merge_VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_AMD_shader_early_and_late_fragment_tests)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD): + merge_VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_fragment_shading_rate_enums)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV): + merge_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_ray_tracing_motion_blur)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV): + merge_VkPhysicalDeviceRayTracingMotionBlurFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_ycbcr_2plane_444_formats)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT): + merge_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_fragment_density_map2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT): + merge_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_image_compression_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT): + merge_VkPhysicalDeviceImageCompressionControlFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_attachment_feedback_loop_layout)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT): + merge_VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_4444_formats)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT): + merge_VkPhysicalDevice4444FormatsFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_device_fault)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT): + merge_VkPhysicalDeviceFaultFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_rasterization_order_attachment_access)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT): + merge_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_ARM_rasterization_order_attachment_access)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM): + merge_VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_rgba10x6_formats)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT): + merge_VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_mutable_descriptor_type)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT): + merge_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_VALVE_mutable_descriptor_type)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE): + merge_VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_vertex_input_dynamic_state)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT): + merge_VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_device_address_binding_report)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ADDRESS_BINDING_REPORT_FEATURES_EXT): + merge_VkPhysicalDeviceAddressBindingReportFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_depth_clip_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT): + merge_VkPhysicalDeviceDepthClipControlFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_primitive_topology_list_restart)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT): + merge_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_HUAWEI_subpass_shading)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_FEATURES_HUAWEI): + merge_VkPhysicalDeviceSubpassShadingFeaturesHUAWEI(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_HUAWEI_invocation_mask)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INVOCATION_MASK_FEATURES_HUAWEI): + merge_VkPhysicalDeviceInvocationMaskFeaturesHUAWEI(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_external_memory_rdma)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_RDMA_FEATURES_NV): + merge_VkPhysicalDeviceExternalMemoryRDMAFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_pipeline_properties)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT): + merge_VkPhysicalDevicePipelinePropertiesFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_frame_boundary)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAME_BOUNDARY_FEATURES_EXT): + merge_VkPhysicalDeviceFrameBoundaryFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_multisampled_render_to_single_sampled)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_EXT): + merge_VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_extended_dynamic_state2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT): + merge_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_color_write_enable)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COLOR_WRITE_ENABLE_FEATURES_EXT): + merge_VkPhysicalDeviceColorWriteEnableFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_primitives_generated_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT): + merge_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VALVE_video_encode_rgb_conversion)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_RGB_CONVERSION_FEATURES_VALVE): + merge_VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_image_view_min_lod)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT): + merge_VkPhysicalDeviceImageViewMinLodFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_multi_draw)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT): + merge_VkPhysicalDeviceMultiDrawFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_image_2d_view_of_3d)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT): + merge_VkPhysicalDeviceImage2DViewOf3DFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_tile_image)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT): + merge_VkPhysicalDeviceShaderTileImageFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_opacity_micromap)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPACITY_MICROMAP_FEATURES_EXT): + merge_VkPhysicalDeviceOpacityMicromapFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_displacement_micromap)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV): + merge_VkPhysicalDeviceDisplacementMicromapFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_HUAWEI_cluster_culling_shader)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI): + merge_VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_border_color_swizzle)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT): + merge_VkPhysicalDeviceBorderColorSwizzleFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_pageable_device_local_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT): + merge_VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_ARM_scheduling_controls)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM): + merge_VkPhysicalDeviceSchedulingControlsFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_image_sliced_view_of_3d)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT): + merge_VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VALVE_descriptor_set_host_mapping)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE): + merge_VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_non_seamless_cube_map)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT): + merge_VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_ARM_render_pass_striped)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_FEATURES_ARM): + merge_VkPhysicalDeviceRenderPassStripedFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_fragment_density_map_offset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_EXT): + merge_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_QCOM_fragment_density_map_offset)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM): + merge_VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_copy_memory_indirect)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_NV): + merge_VkPhysicalDeviceCopyMemoryIndirectFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_memory_decompression)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_EXT): + merge_VkPhysicalDeviceMemoryDecompressionFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#elif (defined(VK_NV_memory_decompression)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_NV): + merge_VkPhysicalDeviceMemoryDecompressionFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_device_generated_commands_compute)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_COMPUTE_FEATURES_NV): + merge_VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_ray_tracing_linear_swept_spheres)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_LINEAR_SWEPT_SPHERES_FEATURES_NV): + merge_VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_linear_color_attachment)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV): + merge_VkPhysicalDeviceLinearColorAttachmentFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_image_compression_control_swapchain)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT): + merge_VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_image_processing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM): + merge_VkPhysicalDeviceImageProcessingFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_nested_command_buffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_FEATURES_EXT): + merge_VkPhysicalDeviceNestedCommandBufferFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_extended_dynamic_state3)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_FEATURES_EXT): + merge_VkPhysicalDeviceExtendedDynamicState3FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_subpass_merge_feedback)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT): + merge_VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_ARM_tensors)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TENSOR_FEATURES_ARM): + merge_VkPhysicalDeviceTensorFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_ARM_tensors)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_TENSOR_FEATURES_ARM): + merge_VkPhysicalDeviceDescriptorBufferTensorFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_module_identifier)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT): + merge_VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_optical_flow)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_FEATURES_NV): + merge_VkPhysicalDeviceOpticalFlowFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_legacy_dithering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT): + merge_VkPhysicalDeviceLegacyDitheringFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if defined(VK_USE_PLATFORM_ANDROID_KHR) && (defined(VK_ANDROID_external_format_resolve)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_FEATURES_ANDROID): + merge_VkPhysicalDeviceExternalFormatResolveFeaturesANDROID(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_AMD_anti_lag)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ANTI_LAG_FEATURES_AMD): + merge_VkPhysicalDeviceAntiLagFeaturesAMD(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_AMDX_dense_geometry_format)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DENSE_GEOMETRY_FORMAT_FEATURES_AMDX): + merge_VkPhysicalDeviceDenseGeometryFormatFeaturesAMDX(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_object)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT): + merge_VkPhysicalDeviceShaderObjectFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_tile_properties)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM): + merge_VkPhysicalDeviceTilePropertiesFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_SEC_amigo_profiling)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC): + merge_VkPhysicalDeviceAmigoProfilingFeaturesSEC(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_multiview_per_view_viewports)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_VIEWPORTS_FEATURES_QCOM): + merge_VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_ray_tracing_invocation_reorder)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_FEATURES_NV): + merge_VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_cooperative_vector)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_VECTOR_FEATURES_NV): + merge_VkPhysicalDeviceCooperativeVectorFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_extended_sparse_address_space)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_FEATURES_NV): + merge_VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_legacy_vertex_attributes)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_VERTEX_ATTRIBUTES_FEATURES_EXT): + merge_VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_ARM_shader_core_builtins)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_FEATURES_ARM): + merge_VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_pipeline_library_group_handles)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT): + merge_VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_dynamic_rendering_unused_attachments)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT): + merge_VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_ARM_data_graph)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DATA_GRAPH_FEATURES_ARM): + merge_VkPhysicalDeviceDataGraphFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_multiview_per_view_render_areas)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM): + merge_VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_per_stage_descriptor_set)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PER_STAGE_DESCRIPTOR_SET_FEATURES_NV): + merge_VkPhysicalDevicePerStageDescriptorSetFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_image_processing2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_2_FEATURES_QCOM): + merge_VkPhysicalDeviceImageProcessing2FeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_filter_cubic_weights)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_WEIGHTS_FEATURES_QCOM): + merge_VkPhysicalDeviceCubicWeightsFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_ycbcr_degamma)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_DEGAMMA_FEATURES_QCOM): + merge_VkPhysicalDeviceYcbcrDegammaFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_filter_cubic_clamp)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_CLAMP_FEATURES_QCOM): + merge_VkPhysicalDeviceCubicClampFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_attachment_feedback_loop_dynamic_state)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT): + merge_VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if defined(VK_USE_PLATFORM_SCREEN_QNX) && (defined(VK_QNX_external_memory_screen_buffer)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCREEN_BUFFER_FEATURES_QNX): + merge_VkPhysicalDeviceExternalMemoryScreenBufferFeaturesQNX(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_descriptor_pool_overallocation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV): + merge_VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_QCOM_tile_memory_heap)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_MEMORY_HEAP_FEATURES_QCOM): + merge_VkPhysicalDeviceTileMemoryHeapFeaturesQCOM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_raw_access_chains)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAW_ACCESS_CHAINS_FEATURES_NV): + merge_VkPhysicalDeviceRawAccessChainsFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_command_buffer_inheritance)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMMAND_BUFFER_INHERITANCE_FEATURES_NV): + merge_VkPhysicalDeviceCommandBufferInheritanceFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_shader_atomic_float16_vector)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT16_VECTOR_FEATURES_NV): + merge_VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_replicated_composites)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_REPLICATED_COMPOSITES_FEATURES_EXT): + merge_VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_float8)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT8_FEATURES_EXT): + merge_VkPhysicalDeviceShaderFloat8FeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_ray_tracing_validation)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_VALIDATION_FEATURES_NV): + merge_VkPhysicalDeviceRayTracingValidationFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_cluster_acceleration_structure)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_ACCELERATION_STRUCTURE_FEATURES_NV): + merge_VkPhysicalDeviceClusterAccelerationStructureFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_partitioned_acceleration_structure)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PARTITIONED_ACCELERATION_STRUCTURE_FEATURES_NV): + merge_VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_device_generated_commands)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_EXT): + merge_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_MESA_image_alignment_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ALIGNMENT_CONTROL_FEATURES_MESA): + merge_VkPhysicalDeviceImageAlignmentControlFeaturesMESA(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_depth_clamp_control)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_CONTROL_FEATURES_EXT): + merge_VkPhysicalDeviceDepthClampControlFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_HUAWEI_hdr_vivid)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HDR_VIVID_FEATURES_HUAWEI): + merge_VkPhysicalDeviceHdrVividFeaturesHUAWEI(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_NV_cooperative_matrix2)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_2_FEATURES_NV): + merge_VkPhysicalDeviceCooperativeMatrix2FeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_ARM_pipeline_opacity_micromap)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_OPACITY_MICROMAP_FEATURES_ARM): + merge_VkPhysicalDevicePipelineOpacityMicromapFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_vertex_attribute_robustness)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_ROBUSTNESS_FEATURES_EXT): + merge_VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_ARM_format_pack)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FORMAT_PACK_FEATURES_ARM): + merge_VkPhysicalDeviceFormatPackFeaturesARM(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_VALVE_fragment_density_map_layered)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_LAYERED_FEATURES_VALVE): + merge_VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if defined(VK_ENABLE_BETA_EXTENSIONS) && (defined(VK_NV_present_metering)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_METERING_FEATURES_NV): + merge_VkPhysicalDevicePresentMeteringFeaturesNV(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_zero_initialize_device_memory)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_DEVICE_MEMORY_FEATURES_EXT): + merge_VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_64bit_indexing)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_64_BIT_INDEXING_FEATURES_EXT): + merge_VkPhysicalDeviceShader64BitIndexingFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_SEC_pipeline_cache_incremental_mode)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CACHE_INCREMENTAL_MODE_FEATURES_SEC): + merge_VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_shader_uniform_buffer_unsized_array)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_UNIFORM_BUFFER_UNSIZED_ARRAY_FEATURES_EXT): + merge_VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_acceleration_structure)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR): + merge_VkPhysicalDeviceAccelerationStructureFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_ray_tracing_pipeline)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR): + merge_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_KHR_ray_query)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR): + merge_VkPhysicalDeviceRayQueryFeaturesKHR(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif +#if (defined(VK_EXT_mesh_shader)) + case(VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_EXT): + merge_VkPhysicalDeviceMeshShaderFeaturesEXT(*reinterpret_cast(current), *reinterpret_cast(merge_in)); + break; +#endif + default: + break; + } +} +} // namespace vkb \ No newline at end of file diff --git a/src/Etterna/Actor/Base/Actor.cpp b/src/Etterna/Actor/Base/Actor.cpp index 086a410c37..82a956669f 100644 --- a/src/Etterna/Actor/Base/Actor.cpp +++ b/src/Etterna/Actor/Base/Actor.cpp @@ -7,6 +7,7 @@ #include "Etterna/Singletons/MessageManager.h" #include "Etterna/Models/Misc/Preference.h" #include "RageUtil/Graphics/RageDisplay.h" +#include "RageUtil/Graphics/RageTexture.h" #include "RageUtil/Misc/RageMath.h" #include "RageUtil/Misc/RageTimer.h" #include "RageUtil/Utils/RageUtil.h" @@ -17,6 +18,7 @@ #include #include #include +#include static Preference g_bShowMasks("ShowMasks", false); static const float default_effect_period = 1.0f; @@ -517,7 +519,11 @@ Actor::Draw() ASSERT(m_pTempState != nullptr); if (PartiallyOpaque()) { this->BeginDraw(); + + DISPLAY->SetGraphicsPipeline(m_CustomShaders, m_VertexShaderArgs, m_FragmentShaderArgs, m_ShaderPersistence); this->DrawPrimitives(); + DISPLAY->SetGraphicsPipeline(0, {}, {}, m_ShaderPersistence); + this->EndDraw(); } this->PostDraw(); @@ -1648,6 +1654,14 @@ Actor::HandleMessage(const Message& msg) PlayCommandNoRecurse(msg); } +void +Actor::SetShaders(const std::string& vertexShaderPath, + const std::string& fragmentShaderPath) +{ + m_CustomShaders = DISPLAY->CreateGraphicsPipeline( + FILEMAN->ResolvePath(vertexShaderPath), FILEMAN->ResolvePath(fragmentShaderPath)); +} + void Actor::PlayCommandNoRecurse(const Message& msg) { @@ -2728,6 +2742,177 @@ class LunaActor : public Luna return 0; return 1; } + static int SetShaders(T* p, lua_State* L) + { + p->SetShaders(SArg(1), SArg(2)); + COMMON_RETURN_SELF; + } + static int ResetShaders(T* p, lua_State* L) + { + p->ResetShaders(); + COMMON_RETURN_SELF; + } + static int SetShaderPersistence(T* p, lua_State* L) + { + p->SetShaderPersistence(BArg(1)); + COMMON_RETURN_SELF; + } + static int GetShaderPersistence(T* p, lua_State* L) + { + lua_pushboolean(L,p->GetShaderPersistence()); + return 1; + } + static int SetShaderParameters(T* p, lua_State* L) + { + int argCount = lua_gettop(L); + if (argCount != 2) { + luaL_error( + L, + "Expected shader type and shader parameter table as arguments"); + } + + auto shaderType = Enum::Check(L, 1); + if (shaderType == ShaderType_Invalid) { + luaL_error(L, "Invalid shader type passed"); + } + + if (!lua_istable(L, 2)) { + luaL_error(L, + "expected shader parameter table to be... a table"); + } + + std::vector& scratchBuffer = shaderType == ShaderType_Vertex + ? p->m_VertexShaderArgs + : p->m_FragmentShaderArgs; + scratchBuffer.clear(); + size_t scratchOffset = 0; + + // turn the table into key-value pairs and gather up all the keys + // (because iterating through the table with lua_objlen is yucky sometimes) + std::vector keys; + lua_pushnil(L); + while (lua_next(L, 2) != 0) { + if (lua_type(L, -2) == LUA_TNUMBER) { + int key = (int)lua_tonumber(L, -2); + keys.push_back(key); + } + lua_pop(L, 1); + } + std::sort(keys.begin(), keys.end()); + + for (const auto& key : keys) { + lua_rawgeti(L, 2, key); + if (!lua_istable(L, -1)) { + luaL_error( + L, + "Shader parameter at key %d is not a table (type: %s)", + key, + lua_typename(L, lua_type(L, -1))); + } + + lua_rawgeti(L, -1, 1); + auto paramType = Enum::Check(L, -1); + lua_pop(L, 1); + + lua_rawgeti(L, -1, 2); + + switch (paramType) { + case ShaderParamType_Int: { + int arg = IArg(-1); + scratchBuffer.resize(scratchOffset + sizeof(int)); + std::memcpy( + &scratchBuffer[scratchOffset], &arg, sizeof(int)); + scratchOffset += sizeof(int); + + lua_pop(L, 1); + break; + } + case ShaderParamType_IntArray: { + int arrayLength = IArg(-1); + if (arrayLength < 1) { + luaL_error( + L, + "Invalid array length for shader parameter at key %d", + key); + } + lua_pop(L, 1); + + std::vector array(arrayLength); + for (int j = 0; j < arrayLength; j++) { + lua_rawgeti(L, -1, 3 + j); + array[j] = IArg(-1); + lua_pop(L, 1); + } + + scratchBuffer.resize(scratchOffset + + sizeof(int) * arrayLength); + std::memcpy(&scratchBuffer[scratchOffset], + array.data(), + sizeof(int) * arrayLength); + scratchOffset += sizeof(int) * arrayLength; + break; + } + case ShaderParamType_Float: { + float arg = FArg(-1); + scratchBuffer.resize(scratchOffset + sizeof(float)); + std::memcpy( + &scratchBuffer[scratchOffset], &arg, sizeof(float)); + scratchOffset += sizeof(float); + + lua_pop(L, 1); + break; + } + case ShaderParamType_FloatArray: { + int arrayLength = IArg(-1); + if (arrayLength < 1) { + luaL_error( + L, + "Invalid array length for shader parameter at key %d", + key); + } + lua_pop(L, 1); + + std::vector array(arrayLength); + for (int j = 0; j < arrayLength; j++) { + lua_rawgeti(L, -1, 3 + j); + array[j] = FArg(-1); + lua_pop(L, 1); + } + + scratchBuffer.resize(scratchOffset + + sizeof(float) * arrayLength); + std::memcpy(&scratchBuffer[scratchOffset], + array.data(), + sizeof(float) * arrayLength); + scratchOffset += sizeof(float) * arrayLength; + break; + } + case ShaderParamType_Texture: { + auto* texture = Luna::check(L, -1); + int arg = texture->GetTexHandle(); + scratchBuffer.resize(scratchOffset + sizeof(int)); + std::memcpy( + &scratchBuffer[scratchOffset], &arg, sizeof(int)); + scratchOffset += sizeof(int); + lua_pop(L, 1); + break; + } + default: + luaL_error( + L, "Invalid shader parameter type at key %d", key); + } + + lua_pop(L, 1); + } + + COMMON_RETURN_SELF; + } + static int ResetShaderParameters(T* p, lua_State* L) + { + p->m_VertexShaderArgs.clear(); + p->m_FragmentShaderArgs.clear(); + COMMON_RETURN_SELF; + } DEFINE_METHOD(GetTrueX, GetTrueX()); DEFINE_METHOD(GetTrueY, GetTrueY()); DEFINE_METHOD(GetTrueZ, GetTrueZ()); @@ -2927,6 +3112,12 @@ class LunaActor : public Luna ADD_METHOD(GetTrueRotationZ); ADD_METHOD(IsVisible); ADD_METHOD(IsOver); + ADD_METHOD(SetShaders); + ADD_METHOD(ResetShaders); + ADD_METHOD(SetShaderPersistence); + ADD_METHOD(GetShaderPersistence); + ADD_METHOD(SetShaderParameters); + ADD_METHOD(ResetShaderParameters); } }; diff --git a/src/Etterna/Actor/Base/Actor.h b/src/Etterna/Actor/Base/Actor.h index 097dabe856..4bacf38ef4 100644 --- a/src/Etterna/Actor/Base/Actor.h +++ b/src/Etterna/Actor/Base/Actor.h @@ -808,6 +808,13 @@ class Actor : public MessageSubscriber virtual void SetUpdateRate(float /*unused*/) {} virtual auto GetUpdateRate() -> float { return 1.0F; } + void SetShaders(const std::string& vertexShaderPath, + const std::string& fragmentShaderPath); + void ResetShaders() { m_CustomShaders = 0; } + bool GetShaderPersistence() const { return m_ShaderPersistence; } + void SetShaderPersistence(bool persist) { m_ShaderPersistence = persist; } + std::vector m_VertexShaderArgs, m_FragmentShaderArgs; + std::unique_ptr m_pLuaInstance; protected: @@ -939,6 +946,8 @@ class Actor : public MessageSubscriber static std::vector g_vfCurrentBGMBeatPlayer; static std::vector g_vfCurrentBGMBeatPlayerNoOffset; + intptr_t m_CustomShaders = 0; + bool m_ShaderPersistence = false; private: // commands std::map m_mapNameToCommands; diff --git a/src/Etterna/Globals/StepMania.cpp b/src/Etterna/Globals/StepMania.cpp index 41af771eba..8e94f111b4 100644 --- a/src/Etterna/Globals/StepMania.cpp +++ b/src/Etterna/Globals/StepMania.cpp @@ -29,6 +29,11 @@ #include "RageUtil/Graphics/RageSurface_Load.h" #include "Etterna/Screen/Others/Screen.h" #include "Etterna/Globals/GameLoop.h" +#include "RageUtil/Graphics/Display/Display.h" + +#if defined(WITH_VULKAN) +#include "RageUtil/Graphics/RendererVK/RendererVK.h" +#endif #if !defined(SUPPORT_OPENGL) && !defined(SUPPORT_D3D) #define SUPPORT_OPENGL @@ -385,9 +390,10 @@ AdjustForChangedSystemCapabilities() if (g_iLastSeenMemory == Memory) return; - Locator::getLogger()->trace("Memory changed from {} to {}; settings changed", - g_iLastSeenMemory.Get(), - Memory); + Locator::getLogger()->trace( + "Memory changed from {} to {}; settings changed", + g_iLastSeenMemory.Get(), + Memory); g_iLastSeenMemory.Set(Memory); // is this assumption outdated? -aj @@ -639,7 +645,7 @@ struct VideoCardDefaults // Default graphics settings used for all cards that don't match above. // This must be the very last entry! "", - "opengl,d3d", + "opengl,d3d,vulkan", 640, 480, 32, @@ -669,7 +675,7 @@ CheckVideoDefaultSettings() std::string sVideoDriver = GetVideoDriverName(); Locator::getLogger()->info("Last seen video driver: {}", - PREFSMAN->m_sLastSeenVideoDriver.Get().c_str()); + PREFSMAN->m_sLastSeenVideoDriver.Get().c_str()); // allow players to opt out of the forced reset when a new video card is // detected - mina @@ -684,7 +690,9 @@ CheckVideoDefaultSettings() std::string sDriverRegex = defaults.sDriverRegex; Regex regex(sDriverRegex); if (regex.Compare(sVideoDriver)) { - Locator::getLogger()->trace("Card matches '{}'.", sDriverRegex.size() ? sDriverRegex.c_str() : "(unknown card)"); + Locator::getLogger()->trace( + "Card matches '{}'.", + sDriverRegex.size() ? sDriverRegex.c_str() : "(unknown card)"); break; } } @@ -695,10 +703,12 @@ CheckVideoDefaultSettings() bool bSetDefaultVideoParams = false; if (PREFSMAN->m_sVideoRenderers.Get().empty()) { bSetDefaultVideoParams = true; - Locator::getLogger()->trace("Applying defaults for {}.", sVideoDriver.c_str()); + Locator::getLogger()->trace("Applying defaults for {}.", + sVideoDriver.c_str()); } else if (PREFSMAN->m_sLastSeenVideoDriver.Get() != sVideoDriver) { bSetDefaultVideoParams = true; - Locator::getLogger()->trace("Video card has changed from {} to {}. Applying new defaults.", + Locator::getLogger()->trace( + "Video card has changed from {} to {}. Applying new defaults.", PREFSMAN->m_sLastSeenVideoDriver.Get().c_str(), sVideoDriver.c_str()); } @@ -721,11 +731,14 @@ CheckVideoDefaultSettings() PREFSMAN->m_sLastSeenVideoDriver.Set(GetVideoDriverName()); } else if (CompareNoCase(PREFSMAN->m_sVideoRenderers.Get(), defaults.sVideoRenderers)) { - Locator::getLogger()->warn("Video renderer list has been changed from '{}' to '{}'", - defaults.sVideoRenderers.c_str(), PREFSMAN->m_sVideoRenderers.Get().c_str()); + Locator::getLogger()->warn( + "Video renderer list has been changed from '{}' to '{}'", + defaults.sVideoRenderers.c_str(), + PREFSMAN->m_sVideoRenderers.Get().c_str()); } - Locator::getLogger()->info("Video renderers: '{}'", PREFSMAN->m_sVideoRenderers.Get().c_str()); + Locator::getLogger()->info("Video renderers: '{}'", + PREFSMAN->m_sVideoRenderers.Get().c_str()); return bSetDefaultVideoParams; } @@ -808,7 +821,16 @@ CreateDisplay() #if defined(SUPPORT_D3D) pRet = new RageDisplay_D3D; #endif - } else if (CompareNoCase(sRenderer, "null") == 0) { + } +#if defined(WITH_VULKAN) +#if !defined(__APPLE__) + else if (CompareNoCase(sRenderer, "vulkan") == 0) { + pRet = + new DisplayAdapter::Display(std::make_unique()); + } +#endif +#endif + else if (CompareNoCase(sRenderer, "null") == 0) { return new RageDisplay_Null; } else { RageException::Throw( @@ -819,8 +841,8 @@ CreateDisplay() if (pRet == nullptr) continue; - std::string sError = - pRet->Init(std::move(params), PREFSMAN->m_bAllowUnacceleratedRenderer); + std::string sError = pRet->Init( + std::move(params), PREFSMAN->m_bAllowUnacceleratedRenderer); if (!sError.empty()) { error += ssprintf(ERROR_INITIALIZING.GetValue(), sRenderer.c_str()) + @@ -851,8 +873,10 @@ SwitchToLastPlayedGame() if (!GAMEMAN->IsGameEnabled(pGame) && pGame != GAMEMAN->GetDefaultGame()) { pGame = GAMEMAN->GetDefaultGame(); - Locator::getLogger()->warn(R"(Default NoteSkin for "{}" missing, reverting to "{}")", - pGame->m_szName, GAMEMAN->GetDefaultGame()->m_szName); + Locator::getLogger()->warn( + R"(Default NoteSkin for "{}" missing, reverting to "{}")", + pGame->m_szName, + GAMEMAN->GetDefaultGame()->m_szName); } ASSERT(GAMEMAN->IsGameEnabled(pGame)); @@ -883,7 +907,8 @@ StepMania::InitializeCurrentGame(const Game* g) argCurGame != sGametype) { Game const* new_game = GAMEMAN->StringToGame(argCurGame); if (new_game == nullptr) { - Locator::getLogger()->warn("{} is not a known game type, ignoring.", argCurGame.c_str()); + Locator::getLogger()->warn("{} is not a known game type, ignoring.", + argCurGame.c_str()); } else { PREFSMAN->SetCurrentGame(sGametype); GAMESTATE->SetCurGame(new_game); @@ -935,7 +960,8 @@ WriteLogHeader() // params. args += ssprintf("[[%s]]", g_argv[i]); } - Locator::getLogger()->info("Command line args (count={}): {}", (g_argc - 1), args.c_str()); + Locator::getLogger()->info( + "Command line args (count={}): {}", (g_argc - 1), args.c_str()); } } @@ -945,8 +971,8 @@ static LocalizedString COULDNT_OPEN_LOADING_WINDOW( static void MountAdditionalDirs(const std::string& sDirList, - const std::string& sDelimiter, - const std::string& sMountPoint) + const std::string& sDelimiter, + const std::string& sMountPoint) { std::vector dirs; split(sDirList, sDelimiter, dirs, true); @@ -961,25 +987,28 @@ sm_main(int argc, char* argv[]) seed_lua_prng(); // Initialize Logging - Locator::provide(std::make_unique()); + Locator::provide(std::make_unique()); - // Init Crash Handling + // Init Crash Handling bool success = Core::Crash::initCrashpad(); - if(!success) - Locator::getLogger()->warn("Crash Handler could not be initialized. Crash reports will not be created."); - - // Log App and System Information - Locator::getLogger()->info("{} v{} - Build {}", - Core::AppInfo::APP_TITLE, - Core::AppInfo::APP_VERSION, - Core::AppInfo::GIT_HASH); - Locator::getLogger()->info("System: {}", Core::Platform::getSystem()); - Locator::getLogger()->info("CPU: {}", Core::Platform::getSystemCPU()); - Locator::getLogger()->info("System Architecture: {}", Core::Platform::getArchitecture()); - Locator::getLogger()->info("Total Memory: {}GB", Core::Platform::getSystemMemory() / pow(1024, 3)); - - // Run Platform Initialization - Core::Platform::init(); + if (!success) + Locator::getLogger()->warn("Crash Handler could not be initialized. " + "Crash reports will not be created."); + + // Log App and System Information + Locator::getLogger()->info("{} v{} - Build {}", + Core::AppInfo::APP_TITLE, + Core::AppInfo::APP_VERSION, + Core::AppInfo::GIT_HASH); + Locator::getLogger()->info("System: {}", Core::Platform::getSystem()); + Locator::getLogger()->info("CPU: {}", Core::Platform::getSystemCPU()); + Locator::getLogger()->info("System Architecture: {}", + Core::Platform::getArchitecture()); + Locator::getLogger()->info( + "Total Memory: {}GB", Core::Platform::getSystemMemory() / pow(1024, 3)); + + // Run Platform Initialization + Core::Platform::init(); RageThreadRegister thread("Main thread"); RageException::SetCleanupHandler(HandleException); @@ -998,23 +1027,27 @@ sm_main(int argc, char* argv[]) FILEMAN = new RageFileManager(argv[0]); const char* envRootDir = std::getenv("ETTERNA_ROOT_DIR"); std::string rootDir = (envRootDir && std::strlen(envRootDir) > 0) - ? envRootDir : Core::Platform::getAppDirectory(); + ? envRootDir + : Core::Platform::getAppDirectory(); if (!FILEMAN->Mount("dir", rootDir, "/")) { - Locator::getLogger()->error("Failed to mount root directory: {}", rootDir); + Locator::getLogger()->error("Failed to mount root directory: {}", + rootDir); return 1; } // load preferences and mount any alternative trees. PREFSMAN = new PrefsManager; - /* Allow ArchHooks to check for multiple instances. We need to do this after - * PREFS is initialized, so ArchHooks can use a preference to turn this off. - * We want to do this before ApplyLogPreferences, so if we exit because of - * another instance, we don't try to clobber its log. We also want to do - * this before opening the loading window, so if we give focus away, we - * don't flash the window. */ - if (!g_bAllowMultipleInstances.Get() && Core::Platform::isOtherInstanceRunning(argc, argv)) { - Locator::getLogger()->warn("Multiple instances are disabled. Other instance detected. Shutting down..."); + /* Allow ArchHooks to check for multiple instances. We need to do this + * after PREFS is initialized, so ArchHooks can use a preference to turn + * this off. We want to do this before ApplyLogPreferences, so if we exit + * because of another instance, we don't try to clobber its log. We also + * want to do this before opening the loading window, so if we give focus + * away, we don't flash the window. */ + if (!g_bAllowMultipleInstances.Get() && + Core::Platform::isOtherInstanceRunning(argc, argv)) { + Locator::getLogger()->warn("Multiple instances are disabled. Other " + "instance detected. Shutting down..."); ShutdownGame(); return 0; } @@ -1024,24 +1057,28 @@ sm_main(int argc, char* argv[]) // Set up alternative filesystem trees. if (!PREFSMAN->m_sAdditionalFolders.Get().empty()) MountAdditionalDirs(PREFSMAN->m_sAdditionalFolders, ",", "/"); - const char* envAdditionalFolders = std::getenv("ETTERNA_ADDITIONAL_ROOT_DIRS"); + const char* envAdditionalFolders = + std::getenv("ETTERNA_ADDITIONAL_ROOT_DIRS"); if (envAdditionalFolders && std::strlen(envAdditionalFolders) > 0) MountAdditionalDirs(envAdditionalFolders, PATH_SEPARATOR, "/"); if (!PREFSMAN->m_sAdditionalSongFolders.Get().empty()) - MountAdditionalDirs(PREFSMAN->m_sAdditionalSongFolders, ",", "/AdditionalSongs"); - const char* envAdditionalSongFolders = std::getenv("ETTERNA_ADDITIONAL_SONG_DIRS"); + MountAdditionalDirs( + PREFSMAN->m_sAdditionalSongFolders, ",", "/AdditionalSongs"); + const char* envAdditionalSongFolders = + std::getenv("ETTERNA_ADDITIONAL_SONG_DIRS"); if (envAdditionalSongFolders && std::strlen(envAdditionalSongFolders) > 0) - MountAdditionalDirs(envAdditionalSongFolders, PATH_SEPARATOR, "/AdditionalSongs"); + MountAdditionalDirs( + envAdditionalSongFolders, PATH_SEPARATOR, "/AdditionalSongs"); /* One of the above filesystems might contain files that affect preferences * (e.g. Data/Static.ini). Re-read preferences. */ PREFSMAN->ReadPrefsFromDisk(); - // Setup options that require preference variables - // Used to be contents of ApplyLogPreferences - Core::Crash::setShouldUpload(PREFSMAN->m_bEnableCrashUpload); - Core::Platform::setConsoleEnabled(PREFSMAN->m_bShowLogOutput); + // Setup options that require preference variables + // Used to be contents of ApplyLogPreferences + Core::Crash::setShouldUpload(PREFSMAN->m_bEnableCrashUpload); + Core::Platform::setConsoleEnabled(PREFSMAN->m_bShowLogOutput); Locator::getLogger()->info("Logging level {} (0 - TRACE | 5 - FATAL)", PREFSMAN->m_logging_level.Get()); Locator::getLogger()->setLogLevel( @@ -1058,9 +1095,10 @@ sm_main(int argc, char* argv[]) BinaryToHex(CryptManager::GetSHA256ForFileWithoutRageFile(argv[0])); std::vector arguments(argv + 1, argv + argc); - noWindow = std::any_of(arguments.begin(), arguments.end(), [](std::string str) { - return str == "notedataCache"; - }); + noWindow = + std::any_of(arguments.begin(), arguments.end(), [](std::string str) { + return str == "notedataCache"; + }); // This requires PREFSMAN, for PREFSMAN->m_bShowLoadingWindow. LoadingWindow* pLoadingWindow = nullptr; @@ -1072,7 +1110,8 @@ sm_main(int argc, char* argv[]) } #if defined(HAVE_TLS) - Locator::getLogger()->info("TLS is {}available", RageThread::GetSupportsTLS() ? "" : "not "); + Locator::getLogger()->info("TLS is {}available", + RageThread::GetSupportsTLS() ? "" : "not "); #endif AdjustForChangedSystemCapabilities(); @@ -1104,7 +1143,8 @@ sm_main(int argc, char* argv[]) } if (PREFSMAN->m_iSoundWriteAhead) - Locator::getLogger()->info("Sound writeahead has been overridden to {}", PREFSMAN->m_iSoundWriteAhead.Get()); + Locator::getLogger()->info("Sound writeahead has been overridden to {}", + PREFSMAN->m_iSoundWriteAhead.Get()); SONGINDEX = new SongCacheIndex; SOUNDMAN = new RageSoundManager; @@ -1381,7 +1421,8 @@ HandleGlobalInputs(const InputEventPlus& input) bool bSaveCompressed = bHoldingShift; RageTimer timer; StepMania::SaveScreenshot("Screenshots/", bSaveCompressed, "", ""); - Locator::getLogger()->debug("Screenshot took {} seconds.", timer.GetDeltaTime()); + Locator::getLogger()->debug("Screenshot took {} seconds.", + timer.GetDeltaTime()); return true; // handled } @@ -1406,7 +1447,9 @@ HandleGlobalInputs(const InputEventPlus& input) return false; } -void StepMania::HandleInputEvents(float fDeltaTime) { +void +StepMania::HandleInputEvents(float fDeltaTime) +{ INPUTFILTER->Update(fDeltaTime); /* Hack: If the topmost screen hasn't been updated yet, don't process input, diff --git a/src/RageUtil/CMakeLists.txt b/src/RageUtil/CMakeLists.txt index 2a2e5a0621..8c15e9d022 100644 --- a/src/RageUtil/CMakeLists.txt +++ b/src/RageUtil/CMakeLists.txt @@ -65,6 +65,19 @@ list(APPEND SMDATA_RAGE_GRAPHICS_SRC "Graphics/RageTextureManager.cpp" "Graphics/RageTextureRenderTarget.cpp" ) + +if(WITH_VULKAN) + list(APPEND SMDATA_RAGE_GRAPHICS_SRC + "Graphics/Display/CommandBatcher.cpp" + "Graphics/Display/CompiledGeometry.cpp" + "Graphics/Display/Display.cpp" + "Graphics/RendererVK/RendererVK.cpp" + "Graphics/RendererVK/VkUtils.cpp" + "Graphics/RendererVK/PersistentBuffer.cpp" + "Graphics/RendererVK/RenderTargetVK.cpp" + "Graphics/RendererVK/PlatformUtils.cpp") +endif() + list(APPEND SMDATA_RAGE_GRAPHICS_HPP "Graphics/RageBitmapTexture.h" "Graphics/RageDisplay.h" @@ -86,8 +99,28 @@ list(APPEND SMDATA_RAGE_GRAPHICS_HPP "Graphics/RageTextureID.h" "Graphics/RageTextureManager.h" "Graphics/RageTextureRenderTarget.h" + + "Graphics/Display/CommandBatcher.h" + "Graphics/Display/DrawMode.h" + "Graphics/Display/MatrixState.h" + "Graphics/Display/RenderState.h" + "Graphics/Display/Display.h" + "Graphics/Display/Renderer.h" + "Graphics/Display/CompiledGeometry.h" + "Graphics/Display/RenderNode.h" + "Graphics/Display/Vertex.h" ) +if(WITH_VULKAN) + list(APPEND SMDATA_RAGE_GRAPHICS_HPP + "Graphics/RendererVK/RendererVK.h" + "Graphics/RendererVK/VkUtils.h" + "Graphics/RendererVK/PersistentBuffer.h" + "Graphics/RendererVK/Texture.h" + "Graphics/RendererVK/RenderTargetVK.h" + "Graphics/RendererVK/PlatformUtils.h") +endif() + if(WIN32) list(APPEND SMDATA_RAGE_GRAPHICS_SRC "Graphics/RageDisplay_D3D.cpp") list(APPEND SMDATA_RAGE_GRAPHICS_HPP "Graphics/RageDisplay_D3D.h") @@ -199,4 +232,4 @@ list(APPEND SMDATA_ALL_RAGE_HPP ${SMDATA_RAGE_UTILS_HPP} ) -target_sources(Etterna PUBLIC ${SMDATA_ALL_RAGE_SRC}) \ No newline at end of file +target_sources(Etterna PUBLIC ${SMDATA_ALL_RAGE_SRC} ${SMDATA_ALL_RAGE_HPP}) \ No newline at end of file diff --git a/src/RageUtil/Graphics/Display/CommandBatcher.cpp b/src/RageUtil/Graphics/Display/CommandBatcher.cpp new file mode 100644 index 0000000000..bc9ebab565 --- /dev/null +++ b/src/RageUtil/Graphics/Display/CommandBatcher.cpp @@ -0,0 +1,373 @@ +#include "CommandBatcher.h" +#include "CompiledGeometry.h" +#include +#include + +void +DisplayAdapter::CommandBatcher::InsertPipelineChangeCommand( + intptr_t pipeline, + const std::vector& vertexShaderArgs, + const std::vector& fragShaderArgs, + bool persist) +{ + uint64_t vertexShaderInfo = UINT64_MAX; + uint64_t fragShaderInfo = UINT64_MAX; + + if (vertexShaderArgs.size()) { + vertexShaderInfo = m_ShaderScratchBuffer.size(); + m_ShaderScratchBuffer.resize(m_ShaderScratchBuffer.size() + + vertexShaderArgs.size()); + std::memcpy(&m_ShaderScratchBuffer[vertexShaderInfo], + vertexShaderArgs.data(), + vertexShaderArgs.size() * sizeof(uint8_t)); + } + + if (fragShaderArgs.size()) { + fragShaderInfo = m_ShaderScratchBuffer.size(); + m_ShaderScratchBuffer.resize(m_ShaderScratchBuffer.size() + + fragShaderArgs.size()); + std::memcpy(&m_ShaderScratchBuffer[fragShaderInfo], + fragShaderArgs.data(), + fragShaderArgs.size() * sizeof(uint8_t)); + } + + PipelineSettings settings = {}; + if (persist) { + if (pipeline) { + m_PipelineStack.push( + { pipeline, vertexShaderInfo, fragShaderInfo }); + } else { + m_PipelineStack.pop(); + } + settings = m_PipelineStack.top(); + } else { + if (m_PipelineStack.size()) { + return; + } + settings = { pipeline, vertexShaderInfo, fragShaderInfo }; + } + + if (!m_RenderNodes.size()) { + InsertRenderTargetCommand(0, false); + } + + if (!m_CurrentPipeline.has_value() || + std::tie(pipeline, vertexShaderInfo, fragShaderInfo) != + std::tie(m_CurrentPipeline->GraphicsPipeline, + m_CurrentPipeline->VertexShaderArg, + m_CurrentPipeline->FragShaderArg)) { + m_RenderNodes[m_CurrentNodeIndex].DrawCalls.push_back( + { settings, m_IndexBuffer.size(), (size_t)0 }); + } + + m_CurrentPipeline = settings; +} + +void +DisplayAdapter::CommandBatcher::InsertRenderTargetCommand(intptr_t renderTarget, + bool preserveTexture) +{ + if (renderTarget == 0) { + if (!m_SwapchainNodeIndex.has_value()) { + m_RenderNodes.push_back( + { renderTarget, preserveTexture, std::vector() }); + m_SwapchainNodeIndex = m_RenderNodes.size() - 1; + } + m_CurrentNodeIndex = *m_SwapchainNodeIndex; + m_RenderNodes[m_CurrentNodeIndex].PreserveRenderTarget = + preserveTexture; + } else { + m_RenderNodes.push_back( + { renderTarget, preserveTexture, std::vector() }); + m_CurrentNodeIndex = m_RenderNodes.size() - 1; + } +} + +uint32_t +GetSamplerFlagsFromRenderState(const DisplayAdapter::RenderState& state) +{ + return (uint8_t)state.textureWrapping | + ((uint8_t)state.textureFiltering << 1); +} + +void +DisplayAdapter::CommandBatcher::InsertSpriteDrawCommand( + DrawMode drawMode, + MatrixState&& matrixState, + const RageSpriteVertex* vertexData, + int vertexCount, + const RenderState& renderState) +{ + assert(drawMode != DrawMode::Invalid); + assert(drawMode != DrawMode::CompiledGeometry); + + m_MatrixStateBuffer.push_back(matrixState); + + const auto previousVertexCount = m_VertexBuffer.size(); + for (int i = 0; i < vertexCount; i++) { + m_VertexBuffer.push_back( + { vertexData[i], + (uint32_t)m_MatrixStateBuffer.size() - 1, + (uint32_t)renderState.textureHandle, + GetSamplerFlagsFromRenderState(renderState) }); + } + + const auto prevCount = m_IndexBuffer.size(); + switch (drawMode) { + case DrawMode::Triangles: { + m_IndexBuffer.resize(prevCount + vertexCount); + for (size_t i = 0; i < vertexCount / 3; i++) { + m_IndexBuffer[prevCount + 3 * i] = previousVertexCount + 3 * i; + m_IndexBuffer[prevCount + 3 * i + 1] = + previousVertexCount + 3 * i + 1; + m_IndexBuffer[prevCount + 3 * i + 2] = + previousVertexCount + 3 * i + 2; + } + break; + } + case DrawMode::Quads: { + m_IndexBuffer.resize(prevCount + 6 * vertexCount / 4); + for (size_t i = 0; i < vertexCount / 4; i++) { + m_IndexBuffer[prevCount + i * 6 + 0] = + previousVertexCount + i * 4 + 0; + m_IndexBuffer[prevCount + i * 6 + 1] = + previousVertexCount + i * 4 + 1; + m_IndexBuffer[prevCount + i * 6 + 2] = + previousVertexCount + i * 4 + 2; + m_IndexBuffer[prevCount + i * 6 + 3] = + previousVertexCount + i * 4 + 2; + m_IndexBuffer[prevCount + i * 6 + 4] = + previousVertexCount + i * 4 + 3; + m_IndexBuffer[prevCount + i * 6 + 5] = + previousVertexCount + i * 4 + 0; + } + + break; + } + case DrawMode::QuadStrip: { + m_IndexBuffer.resize(prevCount + 6 * (vertexCount - 2) / 2); + for (size_t i = 0; i < (vertexCount - 2) / 2; i++) { + m_IndexBuffer[prevCount + i * 6 + 0] = + previousVertexCount + i * 2 + 0; + m_IndexBuffer[prevCount + i * 6 + 1] = + previousVertexCount + i * 2 + 1; + m_IndexBuffer[prevCount + i * 6 + 2] = + previousVertexCount + i * 2 + 2; + m_IndexBuffer[prevCount + i * 6 + 3] = + previousVertexCount + i * 2 + 1; + m_IndexBuffer[prevCount + i * 6 + 4] = + previousVertexCount + i * 2 + 2; + m_IndexBuffer[prevCount + i * 6 + 5] = + previousVertexCount + i * 2 + 3; + } + + break; + } + case DrawMode::Fan: { + assert(vertexCount >= 3); + m_IndexBuffer.resize(prevCount + 3 * (vertexCount - 1)); + for (size_t i = 1; i < vertexCount - 1; i++) { + m_IndexBuffer[prevCount + 3 * i] = previousVertexCount; + m_IndexBuffer[prevCount + 3 * i + 1] = previousVertexCount + i; + m_IndexBuffer[prevCount + 3 * i + 2] = + previousVertexCount + i + 1; + } + + break; + } + case DrawMode::Strip: { + assert(vertexCount >= 3); + m_IndexBuffer.resize(prevCount + 3 * (vertexCount - 2)); + + for (size_t i = 0; i < vertexCount - 2; i++) { + if (i % 2 == 0) { + m_IndexBuffer[prevCount + 3 * i] = previousVertexCount + i; + m_IndexBuffer[prevCount + 3 * i + 1] = + previousVertexCount + i + 1; + m_IndexBuffer[prevCount + 3 * i + 2] = + previousVertexCount + i + 2; + } else { + m_IndexBuffer[prevCount + 3 * i] = + previousVertexCount + i + 1; + m_IndexBuffer[prevCount + 3 * i + 1] = + previousVertexCount + i; + m_IndexBuffer[prevCount + 3 * i + 2] = + previousVertexCount + i + 2; + } + } + + break; + } + case DrawMode::SymmetricQuadStrip: { + m_IndexBuffer.resize(prevCount + 12 * (vertexCount - 3) / 3); + for (size_t i = 0; i < (vertexCount - 3) / 3; i++) { + m_IndexBuffer[prevCount + i * 12 + 0] = + previousVertexCount + i * 3 + 3; + m_IndexBuffer[prevCount + i * 12 + 1] = + previousVertexCount + i * 3 + 1; + m_IndexBuffer[prevCount + i * 12 + 2] = + previousVertexCount + i * 3 + 0; + + m_IndexBuffer[prevCount + i * 12 + 3] = + previousVertexCount + i * 3 + 4; + m_IndexBuffer[prevCount + i * 12 + 4] = + previousVertexCount + i * 3 + 1; + m_IndexBuffer[prevCount + i * 12 + 5] = + previousVertexCount + i * 3 + 3; + + m_IndexBuffer[prevCount + i * 12 + 6] = + previousVertexCount + i * 3 + 5; + m_IndexBuffer[prevCount + i * 12 + 7] = + previousVertexCount + i * 3 + 1; + m_IndexBuffer[prevCount + i * 12 + 8] = + previousVertexCount + i * 3 + 4; + + m_IndexBuffer[prevCount + i * 12 + 9] = + previousVertexCount + i * 3 + 2; + m_IndexBuffer[prevCount + i * 12 + 10] = + previousVertexCount + i * 3 + 1; + m_IndexBuffer[prevCount + i * 12 + 11] = + previousVertexCount + i * 3 + 5; + } + + break; + } + default: + throw std::runtime_error("Unknown draw command type"); + } + + HandleDrawCommand(prevCount, m_IndexBuffer.size() - prevCount, renderState); +} + +void +DisplayAdapter::CommandBatcher::InsertCompiledGeometryDrawCommand( + MatrixState&& matrixState, + const RageCompiledGeometry* p, + int iMeshIndex, + const RenderState& renderState) +{ + const auto geometry = reinterpret_cast(p); + const auto& meshInfo = geometry->m_vMeshInfo[iMeshIndex]; + + m_MatrixStateBuffer.push_back(matrixState); + if (meshInfo.m_bNeedsTextureMatrixScale) { + m_MatrixStateBuffer.back().texture.m[3][0] = 0; + m_MatrixStateBuffer.back().texture.m[3][1] = 0; + } + + RageVColor whiteVColor = {}; + whiteVColor.r = UINT8_MAX; + whiteVColor.g = UINT8_MAX; + whiteVColor.b = UINT8_MAX; + whiteVColor.a = UINT8_MAX; + + const auto previousVertexCount = m_VertexBuffer.size(); + for (int i = 0; i < meshInfo.iVertexCount; i++) { + const auto& vertex = geometry->m_Vertices[meshInfo.iVertexStart + i]; + m_VertexBuffer.push_back( + { { vertex.p, vertex.n, whiteVColor, vertex.t }, + (uint32_t)m_MatrixStateBuffer.size() - 1, + (uint32_t)renderState.textureHandle, + GetSamplerFlagsFromRenderState(renderState) }); + } + + const auto prevIndexCount = m_IndexBuffer.size(); + + for (int i = meshInfo.iTriangleStart; + i < meshInfo.iTriangleStart + meshInfo.iTriangleCount; + i++) { + for (int j = 0; j < 3; j++) { + m_IndexBuffer.push_back(previousVertexCount + + geometry->m_Triangles[i].nVertexIndices[j] - + meshInfo.iVertexStart); + } + } + + HandleDrawCommand( + prevIndexCount, m_IndexBuffer.size() - prevIndexCount, renderState); +} + +void +DisplayAdapter::CommandBatcher::HandleDrawCommand( + int indexOffset, + int indexCount, + const RenderState& renderState) +{ + assert(indexCount > 0); + assert(m_CurrentPipeline.has_value()); + if (!m_RenderNodes.size()) { + InsertRenderTargetCommand(0, false); + } + + auto& node = m_RenderNodes[m_CurrentNodeIndex]; + if (!node.DrawCalls.size()) { + m_RenderNodes[m_CurrentNodeIndex].DrawCalls.push_back( + { *m_CurrentPipeline, + (size_t)indexOffset, + (size_t)0, + renderState.blendingMode, + renderState.depthTestMode, + renderState.depthWriteEnabled }); + } + + // if we previously filled in a different draw call, we should create a new + // one + // or if we changed render state stuffs + + bool filledPreviousCall = + node.DrawCalls[node.DrawCalls.size() - 1].IndexCount != 0 && + node.DrawCalls[node.DrawCalls.size() - 1].IndexCount + + node.DrawCalls[node.DrawCalls.size() - 1].IndexOffset != + indexOffset; + bool differentRenderState = + !filledPreviousCall && + std::tie(node.DrawCalls[node.DrawCalls.size() - 1].BlendingMode, + node.DrawCalls[node.DrawCalls.size() - 1].DepthTestMode, + node.DrawCalls[node.DrawCalls.size() - 1].DepthWriteEnabled) != + std::tie(renderState.blendingMode, + renderState.depthTestMode, + renderState.depthWriteEnabled); + + if (filledPreviousCall || differentRenderState) { + node.DrawCalls.push_back({ *m_CurrentPipeline, + (size_t)indexOffset, + (size_t)0, + renderState.blendingMode, + renderState.depthTestMode, + renderState.depthWriteEnabled }); + } + + node.DrawCalls[node.DrawCalls.size() - 1].IndexCount += indexCount; +} + +void +DisplayAdapter::CommandBatcher::Clear() +{ + m_VertexBuffer.clear(); + m_IndexBuffer.clear(); + m_MatrixStateBuffer.clear(); + m_RenderNodes.clear(); + + // std::stack has no .clear() :| + while (m_PipelineStack.size()) { + m_PipelineStack.pop(); + } + + m_CurrentPipeline = std::nullopt; + m_SwapchainNodeIndex = std::nullopt; + m_CurrentNodeIndex = 0; + + m_ShaderScratchBuffer.clear(); +} + +void +DisplayAdapter::CommandBatcher::FixRenderNodeOrder() +{ + if (!m_RenderNodes.size()) { + return; + } + + auto node = m_RenderNodes[*m_SwapchainNodeIndex]; + m_RenderNodes.erase(m_RenderNodes.begin() + *m_SwapchainNodeIndex); + m_RenderNodes.push_back(node); +} diff --git a/src/RageUtil/Graphics/Display/CommandBatcher.h b/src/RageUtil/Graphics/Display/CommandBatcher.h new file mode 100644 index 0000000000..50799d4230 --- /dev/null +++ b/src/RageUtil/Graphics/Display/CommandBatcher.h @@ -0,0 +1,56 @@ +#ifndef DISPLAY_COMMAND_BATCHER_H +#define DISPLAY_COMMAND_BATCHER_H + +#include +#include +#include +#include +#include +#include "DrawMode.h" +#include "MatrixState.h" +#include "RenderState.h" +#include "RenderNode.h" +#include "Vertex.h" + +namespace DisplayAdapter { + +class CommandBatcher +{ + public: + void InsertPipelineChangeCommand( + intptr_t pipeline, + const std::vector& vertexShaderArgs, + const std::vector& fragShaderArgs, + bool persist); + void InsertRenderTargetCommand(intptr_t renderTarget, bool preserveTexture); + void InsertSpriteDrawCommand(DrawMode drawMode, + MatrixState&& matrixState, + const RageSpriteVertex* vertexData, + int vertexCount, + const RenderState& renderState); + void InsertCompiledGeometryDrawCommand(MatrixState&& matrixState, + const RageCompiledGeometry* p, + int iMeshIndex, + const RenderState& renderState); + void HandleDrawCommand(int indexOffset, + int indexCount, + const RenderState& renderState); + void Clear(); + void FixRenderNodeOrder(); + + std::vector m_VertexBuffer; + std::vector m_IndexBuffer; + std::vector m_MatrixStateBuffer; + std::vector m_RenderNodes; + std::stack m_PipelineStack; + std::vector m_ShaderScratchBuffer; + + std::optional m_SwapchainNodeIndex; + size_t m_CurrentNodeIndex = 0; + + std::optional m_CurrentPipeline = std::nullopt; +}; + +} // namespace Display + +#endif diff --git a/src/RageUtil/Graphics/Display/CompiledGeometry.cpp b/src/RageUtil/Graphics/Display/CompiledGeometry.cpp new file mode 100644 index 0000000000..f2cbfede20 --- /dev/null +++ b/src/RageUtil/Graphics/Display/CompiledGeometry.cpp @@ -0,0 +1,40 @@ +#include "CompiledGeometry.h" +#include + +void +DisplayAdapter::CompiledGeometry::Allocate(const std::vector& vMeshes) +{ + m_Vertices.resize(std::max(1U, static_cast(GetTotalVertices()))); + m_Triangles.resize( + std::max(1U, static_cast(GetTotalTriangles()))); +} + +void +DisplayAdapter::CompiledGeometry::Change(const std::vector& vMeshes) +{ + for (unsigned i = 0; i < vMeshes.size(); i++) { + const auto& meshInfo = m_vMeshInfo[i]; + const auto& mesh = vMeshes[i]; + const auto& Vertices = mesh.Vertices; + const auto& Triangles = mesh.Triangles; + + for (unsigned j = 0; j < Vertices.size(); j++) { + m_Vertices[meshInfo.iVertexStart + j] = Vertices[j]; + } + + for (unsigned j = 0; j < Triangles.size(); j++) { + for (unsigned k = 0; k < 3; k++) { + m_Triangles[meshInfo.iTriangleStart + j].nVertexIndices[k] = + meshInfo.iVertexStart + Triangles[j].nVertexIndices[k]; + } + } + } +} + +void +DisplayAdapter::CompiledGeometry::Draw(int iMeshIndex) const +{ + assert(false && "This should never be called, " + "CommandBatcher::InsertCompiledGeometryDrawCommand should " + "handle the drawing thingy"); +} diff --git a/src/RageUtil/Graphics/Display/CompiledGeometry.h b/src/RageUtil/Graphics/Display/CompiledGeometry.h new file mode 100644 index 0000000000..55965dca51 --- /dev/null +++ b/src/RageUtil/Graphics/Display/CompiledGeometry.h @@ -0,0 +1,21 @@ +#ifndef DISPLAY_COMPILED_GEOMETRY_H +#define DISPLAY_COMPILED_GEOMETRY_H + +#include "RageUtil/Graphics/RageDisplay.h" + +namespace DisplayAdapter { + +struct CompiledGeometry : public RageCompiledGeometry +{ + void Allocate(const std::vector& vMeshes) override; + void Change(const std::vector& vMeshes) override; + void Draw(int iMeshIndex) const override; + + std::vector m_Vertices; + std::vector m_Triangles; + friend class CommandBatcher; +}; + +} + +#endif diff --git a/src/RageUtil/Graphics/Display/Display.cpp b/src/RageUtil/Graphics/Display/Display.cpp new file mode 100644 index 0000000000..63dd6359d1 --- /dev/null +++ b/src/RageUtil/Graphics/Display/Display.cpp @@ -0,0 +1,435 @@ +#include "Display.h" +#include "CompiledGeometry.h" +#include "Core/Services/Locator.hpp" +#include +#include +#include + +DisplayAdapter::Display::Display(std::unique_ptr renderer) + : m_Renderer(std::move(renderer)) + , m_RenderState() +{ +} + +std::string +DisplayAdapter::Display::Init(VideoModeParams&& p, + bool bAllowUnacceleratedRenderer) +{ + Locator::getLogger()->info("DisplayAdapter::Display::Init()"); + Locator::getLogger()->info("Current renderer: UnstableDisplay - {}", + m_Renderer->GetApiDescription()); + + m_Window = LowLevelWindowVK::Create(); + + bool ignored = false; + return SetVideoMode(std::move(p), ignored); +} + +void +DisplayAdapter::Display::GetDisplaySpecs(DisplaySpecs& out) const +{ + m_Window->GetDisplaySpecs(out); +} + +void +DisplayAdapter::Display::ResolutionChanged() +{ + m_Renderer->ResolutionChanged(); + RageDisplay::ResolutionChanged(); +} + +bool +DisplayAdapter::Display::BeginFrame() +{ + m_Window->Update(); + + m_Batcher.Clear(); + m_RenderState.textureFiltering = true; + m_RenderState.textureWrapping = false; + + return m_IsInitDone && RageDisplay::BeginFrame(); +} + +void +DisplayAdapter::Display::EndFrame() +{ + m_Batcher.FixRenderNodeOrder(); + m_Renderer->OnRender(GetActualVideoModeParams(), m_Batcher); + RageDisplay::EndFrame(); +} + +const ActualVideoModeParams* +DisplayAdapter::Display::GetActualVideoModeParams() const +{ + return m_Window->GetActualVideoModeParams(); +} + +std::string +DisplayAdapter::Display::TryVideoMode(const VideoModeParams& p, + bool& bNewDeviceOut) +{ + m_Window->TryVideoMode(p, bNewDeviceOut); + + if (!m_IsInitDone) { + m_Renderer->InitializeRenderer(p); + } else { + m_Renderer->TryVideoMode(p); + } + + ResolutionChanged(); + + m_Renderer->OnRender(GetActualVideoModeParams(), m_Batcher); + + m_IsInitDone = true; + return std::string(); +} + +#pragma region Texture handling + +const RageDisplay::RagePixelFormatDesc* +DisplayAdapter::Display::GetPixelFormatDesc(RagePixelFormat pf) const +{ + assert(pf == RagePixelFormat_RGBA8 || pf == RagePixelFormat_BGRA8); + static auto rgba8 = + RagePixelFormatDesc{ 32, + { 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000 } }; + static auto bgra8 = + RagePixelFormatDesc{ 32, + { 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000 } }; + return pf == RagePixelFormat_RGBA8 ? &rgba8 : &bgra8; +} + +bool +DisplayAdapter::Display::SupportsTextureFormat(RagePixelFormat pixfmt, + bool realtime) +{ + return pixfmt == RagePixelFormat_RGBA8 || pixfmt == RagePixelFormat_BGRA8; +} + +intptr_t +DisplayAdapter::Display::CreateTexture(RagePixelFormat pixfmt, + RageSurface* img, + bool bGenerateMipMaps) +{ + assert(SupportsTextureFormat(pixfmt)); + + return m_Renderer->CreateTexture(img, pixfmt == RagePixelFormat_RGBA8); +} + +void +DisplayAdapter::Display::UpdateTexture(intptr_t uTexHandle, + RageSurface* img, + int xoffset, + int yoffset, + int width, + int height) +{ + m_Renderer->UpdateTexture(uTexHandle, img, xoffset, yoffset, width, height); +} + +void +DisplayAdapter::Display::DeleteTexture(intptr_t iTexHandle) +{ + m_Renderer->DeleteTexture(iTexHandle); +} + +void +DisplayAdapter::Display::ClearAllTextures() +{ + m_Renderer->ClearAllTextures(); +} + +int +DisplayAdapter::Display::GetNumTextureUnits() +{ + return 1; +} + +int +DisplayAdapter::Display::GetMaxTextureSize() const +{ + return DisplayAdapter::Display::MaxTextureSize; +} + +#pragma endregion + +#pragma region RenderState handling + +void +DisplayAdapter::Display::SetTexture(TextureUnit tu, intptr_t iTexture) +{ + assert(tu == TextureUnit_1); + m_RenderState.textureHandle = iTexture; +} + +void +DisplayAdapter::Display::SetTextureWrapping(TextureUnit tu, bool b) +{ + assert(tu == TextureUnit_1); + m_RenderState.textureWrapping = b; +} + +void +DisplayAdapter::Display::SetTextureFiltering(TextureUnit tu, bool b) +{ + assert(tu == TextureUnit_1); + m_RenderState.textureFiltering = b; +} + +void +DisplayAdapter::Display::SetBlendMode(BlendMode mode) +{ + m_RenderState.blendingMode = mode; +} + +void +DisplayAdapter::Display::SetZWrite(bool b) +{ + m_RenderState.depthWriteEnabled = b; +} + +void +DisplayAdapter::Display::SetZTestMode(ZTestMode mode) +{ + m_RenderState.depthTestMode = mode; +} + +bool +DisplayAdapter::Display::IsZWriteEnabled() const +{ + return m_RenderState.depthWriteEnabled; +} + +bool +DisplayAdapter::Display::IsZTestEnabled() const +{ + return m_RenderState.depthTestMode != ZTEST_OFF; +} + +#pragma endregion + +#pragma region Draw queueing + +void +DisplayAdapter::Display::DrawQuadsInternal(const RageSpriteVertex v[], + int iNumVerts) +{ + m_Batcher.InsertSpriteDrawCommand( + DrawMode::Quads, GetCurrentMatrixState(), v, iNumVerts, m_RenderState); +} + +void +DisplayAdapter::Display::DrawQuadStripInternal(const RageSpriteVertex v[], + int iNumVerts) +{ + m_Batcher.InsertSpriteDrawCommand(DrawMode::QuadStrip, + GetCurrentMatrixState(), + v, + iNumVerts, + m_RenderState); +} + +void +DisplayAdapter::Display::DrawFanInternal(const RageSpriteVertex v[], + int iNumVerts) +{ + m_Batcher.InsertSpriteDrawCommand( + DrawMode::Fan, GetCurrentMatrixState(), v, iNumVerts, m_RenderState); +} + +void +DisplayAdapter::Display::DrawStripInternal(const RageSpriteVertex v[], + int iNumVerts) +{ + m_Batcher.InsertSpriteDrawCommand( + DrawMode::Strip, GetCurrentMatrixState(), v, iNumVerts, m_RenderState); +} + +void +DisplayAdapter::Display::DrawTrianglesInternal(const RageSpriteVertex v[], + int iNumVerts) +{ + m_Batcher.InsertSpriteDrawCommand(DrawMode::Triangles, + GetCurrentMatrixState(), + v, + iNumVerts, + m_RenderState); +} + +void +DisplayAdapter::Display::DrawSymmetricQuadStripInternal( + const RageSpriteVertex v[], + int iNumVerts) +{ + m_Batcher.InsertSpriteDrawCommand(DrawMode::SymmetricQuadStrip, + GetCurrentMatrixState(), + v, + iNumVerts, + m_RenderState); +} + +void +DisplayAdapter::Display::DrawCompiledGeometryInternal( + const RageCompiledGeometry* p, + int iMeshIndex) +{ + m_Batcher.InsertCompiledGeometryDrawCommand( + GetCurrentMatrixState(), p, iMeshIndex, m_RenderState); +} + +#pragma endregion + +intptr_t +DisplayAdapter::Display::CreateRenderTarget(const RenderTargetParam& param, + int& iTextureWidthOut, + int& iTextureHeightOut) +{ + return m_Renderer->CreateRenderTarget( + param, iTextureWidthOut, iTextureHeightOut); +} + +intptr_t +DisplayAdapter::Display::GetRenderTarget() +{ + return m_CurrentRenderTarget; +} + +void +DisplayAdapter::Display::SetRenderTarget(intptr_t uTexHandle, + bool bPreserveTexture) +{ + m_Batcher.InsertRenderTargetCommand(uTexHandle, bPreserveTexture); + m_CurrentRenderTarget = uTexHandle; +} + +RageCompiledGeometry* +DisplayAdapter::Display::CreateCompiledGeometry() +{ + return new CompiledGeometry; +} + +void +DisplayAdapter::Display::DeleteCompiledGeometry(RageCompiledGeometry* p) +{ + assert(p != nullptr); + delete p; +} + +RageSurface* +DisplayAdapter::Display::CreateScreenshot() +{ + return m_Renderer->CreateScreenshot(); +} + +bool +DisplayAdapter::Display::SupportsThreadedRendering() +{ + return false; +} + +bool +DisplayAdapter::Display::SupportsPerVertexMatrixScale() +{ + return false; +} + +DisplayAdapter::MatrixState +DisplayAdapter::Display::GetCurrentMatrixState() +{ + MatrixState m = {}; + m.texture = *GetTextureTop(); + + RageMatrix temp = {}; + + RageMatrixMultiply(&temp, GetViewTop(), GetWorldTop()); + RageMatrixMultiply(&m.wvp, GetProjectionTop(), &temp); + + return m; +} + +intptr_t +DisplayAdapter::Display::CreateGraphicsPipeline( + const std::string& vertexShaderPath, + const std::string& fragmentShaderPath) +{ + return m_Renderer->CreateGraphicsPipeline(vertexShaderPath, + fragmentShaderPath); +} + +void +DisplayAdapter::Display::SetGraphicsPipeline( + intptr_t pipeline, + const std::vector& vertexShaderArgs, + const std::vector& fragShaderArgs, + bool persist) +{ + m_Batcher.InsertPipelineChangeCommand( + pipeline, vertexShaderArgs, fragShaderArgs, persist); +} + +#pragma region Unsupported / old graphics API functions + +void +DisplayAdapter::Display::SetTextureMode(TextureUnit tu, TextureMode tm) +{ +} + +void +DisplayAdapter::Display::SetZBias(float f) +{ +} + +void +DisplayAdapter::Display::SetCullMode(CullMode mode) +{ +} + +void +DisplayAdapter::Display::SetAlphaTest(bool b) +{ +} + +void +DisplayAdapter::Display::ClearZBuffer() +{ +} + +void +DisplayAdapter::Display::SetMaterial(const RageColor& emissive, + const RageColor& ambient, + const RageColor& diffuse, + const RageColor& specular, + float shininess) +{ +} + +void +DisplayAdapter::Display::SetLighting(bool b) +{ +} + +void +DisplayAdapter::Display::SetLightOff(int index) +{ +} + +void +DisplayAdapter::Display::SetLightDirectional(int index, + const RageColor& ambient, + const RageColor& diffuse, + const RageColor& specular, + const RageVector3& dir) +{ +} + +void +DisplayAdapter::Display::SetSphereEnvironmentMapping(TextureUnit tu, bool b) +{ +} + +void +DisplayAdapter::Display::SetCelShaded(int stage) +{ +} + +#pragma endregion diff --git a/src/RageUtil/Graphics/Display/Display.h b/src/RageUtil/Graphics/Display/Display.h new file mode 100644 index 0000000000..463d3e237f --- /dev/null +++ b/src/RageUtil/Graphics/Display/Display.h @@ -0,0 +1,135 @@ +/* Display - RageDisplay wrapper for renderer implementations */ +#ifndef DISPLAY_H +#define DISPLAY_H + +#include "RageUtil/Graphics/Display/CommandBatcher.h" +#include "RageUtil/Graphics/RageDisplay.h" +#include "Renderer.h" +#include "RenderState.h" +#include +#include "arch/LowLevelWindowVK/LowLevelWindowVK.h" + +namespace DisplayAdapter { +class Display : public RageDisplay +{ + public: + static constexpr size_t TexturePixelSize = 4; + static constexpr size_t MaxTextureSize = 4096; + + Display(std::unique_ptr renderer); + ~Display() override {} + + std::string Init(VideoModeParams&& p, + bool bAllowUnacceleratedRenderer) override; + [[nodiscard]] std::string GetApiDescription() const override + { + return m_Renderer->GetApiDescription(); + } + void GetDisplaySpecs(DisplaySpecs& out) const override; + void ResolutionChanged() override; + [[nodiscard]] const RagePixelFormatDesc* GetPixelFormatDesc( + RagePixelFormat pf) const override; + + bool BeginFrame() override; + void EndFrame() override; + [[nodiscard]] const ActualVideoModeParams* GetActualVideoModeParams() + const override; + void SetBlendMode(BlendMode mode) override; + bool SupportsTextureFormat(RagePixelFormat pixfmt, + bool realtime = false) override; + bool SupportsThreadedRendering() override; + bool SupportsPerVertexMatrixScale() override; + + intptr_t CreateTexture(RagePixelFormat pixfmt, + RageSurface* img, + bool bGenerateMipMaps) override; + void UpdateTexture(intptr_t uTexHandle, + RageSurface* img, + int xoffset, + int yoffset, + int width, + int height) override; + void DeleteTexture(intptr_t iTexHandle) override; + void ClearAllTextures() override; + [[nodiscard]] int GetMaxTextureSize() const override; + + int GetNumTextureUnits() override; + void SetTexture(TextureUnit tu, intptr_t iTexture) override; + void SetTextureMode(TextureUnit tu, TextureMode tm) override; + void SetTextureWrapping(TextureUnit tu, bool b) override; + void SetTextureFiltering(TextureUnit tu, bool b) override; + [[nodiscard]] bool IsZWriteEnabled() const override; + [[nodiscard]] bool IsZTestEnabled() const override; + void SetZWrite(bool b) override; + void SetZBias(float f) override; + void SetZTestMode(ZTestMode mode) override; + void ClearZBuffer() override; + void SetCullMode(CullMode mode) override; + void SetAlphaTest(bool b) override; + void SetMaterial(const RageColor& emissive, + const RageColor& ambient, + const RageColor& diffuse, + const RageColor& specular, + float shininess) override; + void SetLighting(bool b) override; + void SetLightOff(int index) override; + void SetLightDirectional(int index, + const RageColor& ambient, + const RageColor& diffuse, + const RageColor& specular, + const RageVector3& dir) override; + intptr_t CreateGraphicsPipeline( + const std::string& vertexShaderPath, + const std::string& fragmentShaderPath) override; + void SetGraphicsPipeline(intptr_t pipeline, + const std::vector& vertexShaderArgs, + const std::vector& fragShaderArgs, + bool persist) override; + intptr_t CreateRenderTarget(const RenderTargetParam& param, + int& iTextureWidthOut, + int& iTextureHeightOut) override; + intptr_t GetRenderTarget() override; + void SetRenderTarget(intptr_t uTexHandle, bool bPreserveTexture) override; + + void SetSphereEnvironmentMapping(TextureUnit tu, bool b) override; + void SetCelShaded(int stage) override; + + bool IsD3DInternal() override { return m_Renderer->IsD3DInternal(); } + [[nodiscard]] bool SupportsFullscreenBorderlessWindow() const override + { + return true; + } + + RageCompiledGeometry* CreateCompiledGeometry() override; + void DeleteCompiledGeometry(RageCompiledGeometry* p) override; + + protected: + void DrawQuadsInternal(const RageSpriteVertex v[], int iNumVerts) override; + void DrawQuadStripInternal(const RageSpriteVertex v[], + int iNumVerts) override; + void DrawFanInternal(const RageSpriteVertex v[], int iNumVerts) override; + void DrawStripInternal(const RageSpriteVertex v[], int iNumVerts) override; + void DrawTrianglesInternal(const RageSpriteVertex v[], + int iNumVerts) override; + void DrawSymmetricQuadStripInternal(const RageSpriteVertex v[], + int iNumVerts) override; + void DrawCompiledGeometryInternal(const RageCompiledGeometry* p, + int iMeshIndex) override; + + std::string TryVideoMode(const VideoModeParams& p, + bool& bNewDeviceOut) override; + RageSurface* CreateScreenshot() override; + MatrixState GetCurrentMatrixState(); + + private: + std::unique_ptr m_Renderer; + bool m_IsInitDone = false; + CommandBatcher m_Batcher; + RenderState m_RenderState; + intptr_t m_CurrentRenderTarget = 0; + + LowLevelWindowVK* m_Window = nullptr; +}; +} // namespace DisplayAdapter + +#endif diff --git a/src/RageUtil/Graphics/Display/DrawMode.h b/src/RageUtil/Graphics/Display/DrawMode.h new file mode 100644 index 0000000000..6e22fec65b --- /dev/null +++ b/src/RageUtil/Graphics/Display/DrawMode.h @@ -0,0 +1,20 @@ +#ifndef DISPLAY_DRAW_MODE_H +#define DISPLAY_DRAW_MODE_H + +namespace DisplayAdapter { + +enum class DrawMode +{ + Invalid, + Quads, + QuadStrip, + Fan, + Strip, + Triangles, + SymmetricQuadStrip, + CompiledGeometry, +}; + +} + +#endif \ No newline at end of file diff --git a/src/RageUtil/Graphics/Display/MatrixState.h b/src/RageUtil/Graphics/Display/MatrixState.h new file mode 100644 index 0000000000..b1d65f65cc --- /dev/null +++ b/src/RageUtil/Graphics/Display/MatrixState.h @@ -0,0 +1,16 @@ +#ifndef DISPLAY_MATRIX_STATE_H +#define DISPLAY_MATRIX_STATE_H + +#include "RageUtil/Misc/RageTypes.h" + +namespace DisplayAdapter { + +struct MatrixState +{ + RageMatrix wvp; + RageMatrix texture; +}; + +} + +#endif diff --git a/src/RageUtil/Graphics/Display/RenderNode.h b/src/RageUtil/Graphics/Display/RenderNode.h new file mode 100644 index 0000000000..eeb008e313 --- /dev/null +++ b/src/RageUtil/Graphics/Display/RenderNode.h @@ -0,0 +1,36 @@ +#ifndef DISPLAY_RENDER_NODE_H +#define DISPLAY_RENDER_NODE_H + +#include +#include +#include + +namespace DisplayAdapter { + +struct PipelineSettings +{ + intptr_t GraphicsPipeline = 0; + uint64_t VertexShaderArg = UINT64_MAX; + uint64_t FragShaderArg = UINT64_MAX; +}; + +struct DrawCall +{ + PipelineSettings Settings = {}; + size_t IndexOffset = 0; + size_t IndexCount = 0; + BlendMode BlendingMode = BLEND_NORMAL; + ZTestMode DepthTestMode = ZTEST_OFF; + bool DepthWriteEnabled = false; +}; + +struct RenderNode +{ + intptr_t RenderTarget = 0; + bool PreserveRenderTarget = false; + std::vector DrawCalls; +}; + +} + +#endif diff --git a/src/RageUtil/Graphics/Display/RenderState.h b/src/RageUtil/Graphics/Display/RenderState.h new file mode 100644 index 0000000000..793841bb60 --- /dev/null +++ b/src/RageUtil/Graphics/Display/RenderState.h @@ -0,0 +1,21 @@ +#ifndef DISPLAY_RENDER_STATE_H +#define DISPLAY_RENDER_STATE_H + +#include "RageUtil/Misc/RageTypes.h" +#include "RageUtil/Graphics/RageDisplay.h" + +namespace DisplayAdapter { + +struct RenderState +{ + bool textureWrapping = false; + bool textureFiltering = false; + intptr_t textureHandle = 0; + BlendMode blendingMode = BLEND_NORMAL; + ZTestMode depthTestMode = ZTEST_OFF; + bool depthWriteEnabled = false; +}; + +} // namespace DisplayAdapter + +#endif diff --git a/src/RageUtil/Graphics/Display/Renderer.h b/src/RageUtil/Graphics/Display/Renderer.h new file mode 100644 index 0000000000..775384414a --- /dev/null +++ b/src/RageUtil/Graphics/Display/Renderer.h @@ -0,0 +1,40 @@ +#ifndef DISPLAY_RENDERER_H +#define DISPLAY_RENDERER_H + +#include +#include "RageUtil/Graphics/RageDisplay.h" +#include "RageUtil/Graphics/Display/CommandBatcher.h" +#include "RageUtil/Graphics/RageSurface.h" + +namespace DisplayAdapter { +class Renderer +{ + public: + virtual ~Renderer() {} + virtual std::string GetApiDescription() const = 0; + virtual void InitializeRenderer(const VideoModeParams& p) = 0; + virtual void OnRender(const ActualVideoModeParams* p, + const CommandBatcher& batcher) = 0; + virtual bool IsD3DInternal() = 0; + virtual intptr_t CreateTexture(RageSurface* img, bool RGBA8) = 0; + virtual void UpdateTexture(intptr_t textureHandle, + RageSurface* img, + int xOffset, + int yOffset, + int width, + int height) = 0; + virtual void DeleteTexture(intptr_t handle) = 0; + virtual void ClearAllTextures() = 0; + virtual void ResolutionChanged() = 0; + virtual RageSurface* CreateScreenshot() = 0; + virtual intptr_t CreateRenderTarget(const RenderTargetParam& param, + int& iTextureWidthOut, + int& iTextureHeightOut) = 0; + virtual intptr_t CreateGraphicsPipeline( + const std::string& vertexShaderPath, + const std::string& fragmentShaderPath) = 0; + virtual void TryVideoMode(const VideoModeParams& params) = 0; +}; +} + +#endif diff --git a/src/RageUtil/Graphics/Display/Vertex.h b/src/RageUtil/Graphics/Display/Vertex.h new file mode 100644 index 0000000000..36c065c1b3 --- /dev/null +++ b/src/RageUtil/Graphics/Display/Vertex.h @@ -0,0 +1,15 @@ +#include +#ifndef DISPLAY_VERTEX_H +#define DISPLAY_VERTEX_H + +namespace DisplayAdapter { +struct Vertex +{ + RageSpriteVertex VertexData; + uint32_t MatrixIndex; + uint32_t TextureIndex; + uint32_t SamplerIndex; +}; +} + +#endif diff --git a/src/RageUtil/Graphics/RageBitmapTexture.cpp b/src/RageUtil/Graphics/RageBitmapTexture.cpp index ce300310e4..8781083299 100644 --- a/src/RageUtil/Graphics/RageBitmapTexture.cpp +++ b/src/RageUtil/Graphics/RageBitmapTexture.cpp @@ -81,7 +81,10 @@ RageBitmapTexture::Create() ASSERT(!actualID.filename.empty()); - delete m_pSurface; + if (m_pSurface) { + delete m_pSurface; + m_pSurface = nullptr; + } /* Load the image into a RageSurface. */ std::string error; diff --git a/src/RageUtil/Graphics/RageDisplay.h b/src/RageUtil/Graphics/RageDisplay.h index b29d1fa87d..155a3a716b 100644 --- a/src/RageUtil/Graphics/RageDisplay.h +++ b/src/RageUtil/Graphics/RageDisplay.h @@ -268,6 +268,9 @@ struct RageTextureLock class RageDisplay { friend class RageTexture; +#ifdef WITH_VULKAN + friend class RendererVK; +#endif public: struct RagePixelFormatDesc @@ -416,6 +419,17 @@ class RageDisplay virtual void SetSphereEnvironmentMapping(TextureUnit tu, bool b) = 0; virtual void SetCelShaded(int stage) = 0; + virtual intptr_t CreateGraphicsPipeline( + const std::string& vertexShaderPath, + const std::string& fragmentShaderPath) { + return 0; + } + virtual void SetGraphicsPipeline( + intptr_t pipeline, + const std::vector& vertexShaderArgs, + const std::vector& fragShaderArgs, + bool persist) {} + virtual auto CreateCompiledGeometry() -> RageCompiledGeometry* = 0; virtual void DeleteCompiledGeometry(RageCompiledGeometry* p) = 0; @@ -464,7 +478,7 @@ class RageDisplay return nullptr; } // allocates a surface. Caller must delete it. - protected: +protected: virtual void DrawQuadsInternal(const RageSpriteVertex v[], int iNumVerts) = 0; virtual void DrawQuadStripInternal(const RageSpriteVertex v[], diff --git a/src/RageUtil/Graphics/RendererVK/PersistentBuffer.cpp b/src/RageUtil/Graphics/RendererVK/PersistentBuffer.cpp new file mode 100644 index 0000000000..9d7ed4369c --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/PersistentBuffer.cpp @@ -0,0 +1,36 @@ +#ifdef _WIN32 +#define VK_USE_PLATFORM_WIN32_KHR +#endif +#ifdef __unix__ +#define VK_USE_PLATFORM_XLIB_KHR +#endif + +#include "PersistentBuffer.h" +#include "VkUtils.h" + +void +PersistentBuffer::Init(VmaAllocator allocator, + const vk::BufferCreateInfo& createInfo, + const VmaAllocationCreateInfo& allocInfo) +{ + this->allocator = allocator; + ThrowIfFail( + vmaCreateBuffer(allocator, + &static_cast(createInfo), + &allocInfo, + &this->buffer, + &this->allocation, + &this->allocInfo)); +} + +vk::Buffer +PersistentBuffer::Get() const +{ + return vk::Buffer(buffer); +} + +void* +PersistentBuffer::GetMappedData() const +{ + return allocInfo.pMappedData; +} diff --git a/src/RageUtil/Graphics/RendererVK/PersistentBuffer.h b/src/RageUtil/Graphics/RendererVK/PersistentBuffer.h new file mode 100644 index 0000000000..5d0ec6dc6f --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/PersistentBuffer.h @@ -0,0 +1,24 @@ +#ifndef RENDERER_VK_BUFFER_HELPER_H +#define RENDERER_VK_BUFFER_HELPER_H + +#include +#include + +struct PersistentBuffer +{ + VkBuffer buffer = VK_NULL_HANDLE; + VmaAllocation allocation = VK_NULL_HANDLE; + VmaAllocationInfo allocInfo = {}; + VmaAllocator allocator = nullptr; + uint64_t gpuAddress = 0; + + void Init(VmaAllocator allocator, + const vk::BufferCreateInfo& createInfo, + const VmaAllocationCreateInfo& allocInfo); + + vk::Buffer Get() const; + + void* GetMappedData() const; +}; + +#endif diff --git a/src/RageUtil/Graphics/RendererVK/PlatformUtils.cpp b/src/RageUtil/Graphics/RendererVK/PlatformUtils.cpp new file mode 100644 index 0000000000..69738978b6 --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/PlatformUtils.cpp @@ -0,0 +1,74 @@ +#ifdef _WIN32 +#define VK_USE_PLATFORM_WIN32_KHR +#endif +#ifdef __unix__ +#define VK_USE_PLATFORM_XLIB_KHR +#endif +#include "PlatformUtils.h" + +#ifdef _WIN32 +#include "archutils/Win32/GraphicsWindow.h" +#endif +#ifdef __unix__ +#include "archutils/Unix/X11Helper.h" +#endif + +vkb::Result +CreateInstance(PFN_vkDebugUtilsMessengerCallbackEXT debugCallback) +{ + vkb::InstanceBuilder builder; + auto instanceResult = + builder +#ifdef VKDEBUG + .request_validation_layers(true) + .use_default_debug_messenger() + .add_validation_feature_enable( + VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT) + .add_validation_feature_enable( + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT) + .add_validation_feature_enable( + VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT) + .set_debug_callback(debugCallback) +#endif + .require_api_version(1, 3, 0) + .enable_extension(VK_KHR_SURFACE_EXTENSION_NAME) +#ifdef _WIN32 + .enable_extension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME) + .enable_extension(VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME) +#endif +#ifdef __unix__ + .enable_extension(VK_KHR_XLIB_SURFACE_EXTENSION_NAME) +#endif +#ifdef __APPLE__ + .enable_extension(VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME) + .enable_extension( + VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME) +#endif + .build(); + + return instanceResult; +} + +vk::raii::SurfaceKHR +CreateSurfaceKHR(const vk::raii::Instance& instance) +{ +#ifdef _WIN32 + VkWin32SurfaceCreateInfoKHR createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; + createInfo.hwnd = GraphicsWindow::GetHwnd(); + createInfo.hinstance = GetModuleHandle(nullptr); + return instance.createWin32SurfaceKHR(createInfo); +#endif +#ifdef __unix__ + VkXlibSurfaceCreateInfoKHR createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR; + createInfo.dpy = X11Helper::Dpy; + createInfo.window = X11Helper::Win; + + return instance.createXlibSurfaceKHR(createInfo); +#endif +#ifdef __APPLE__ + // TODO: use vkCreateMacOSSurfaceMVK or vkCreateMetalSurfaceEXT? + return nullptr; +#endif +} diff --git a/src/RageUtil/Graphics/RendererVK/PlatformUtils.h b/src/RageUtil/Graphics/RendererVK/PlatformUtils.h new file mode 100644 index 0000000000..758f1cd360 --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/PlatformUtils.h @@ -0,0 +1,20 @@ +#ifndef RENDERER_VK_PLATFORM_UTILS_H +#define RENDERER_VK_PLATFORM_UTILS_H + +#ifdef DEBUG +#define VKDEBUG 1 +#endif +#ifdef _DEBUG +#define VKDEBUG 1 +#endif + +#include +#include + +vk::raii::SurfaceKHR +CreateSurfaceKHR(const vk::raii::Instance& instance); + +vkb::Result +CreateInstance(PFN_vkDebugUtilsMessengerCallbackEXT debugCallback); + +#endif \ No newline at end of file diff --git a/src/RageUtil/Graphics/RendererVK/RenderTargetVK.cpp b/src/RageUtil/Graphics/RendererVK/RenderTargetVK.cpp new file mode 100644 index 0000000000..eaea725751 --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/RenderTargetVK.cpp @@ -0,0 +1,43 @@ +#ifdef _WIN32 +#define VK_USE_PLATFORM_WIN32_KHR +#endif +#ifdef __unix__ +#define VK_USE_PLATFORM_XLIB_KHR +#endif + +#include "RenderTargetVK.h" +#include + +void +RenderTargetVK::Create(const RenderTargetParam& param, + int& iTextureWidthOut, + int& iTextureHeightOut) +{ + m_Param = param; + const auto width = param.iWidth; + const auto height = param.iHeight; + + iTextureWidthOut = width; + iTextureHeightOut = height; + + m_Texture = 0; +} + +auto +RenderTargetVK::GetTexture() const -> intptr_t +{ + assert(false && "Should not be called"); + return m_Texture; +} + +void +RenderTargetVK::StartRenderingTo() +{ + assert(false && "Should not be called"); +} + +void +RenderTargetVK::FinishRenderingTo() +{ + assert(false && "Should not be called"); +} diff --git a/src/RageUtil/Graphics/RendererVK/RenderTargetVK.h b/src/RageUtil/Graphics/RendererVK/RenderTargetVK.h new file mode 100644 index 0000000000..2623df9c84 --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/RenderTargetVK.h @@ -0,0 +1,49 @@ +#ifndef RENDERER_VK_RENDER_TARGET_VK_H +#define RENDERER_VK_RENDER_TARGET_VK_H + +#include "RageUtil/Graphics/RageDisplay.h" + +// TODO: extract to a separate header? OGL/D3D/VK use the same class... +class RenderTarget +{ + public: + virtual ~RenderTarget() = default; + + virtual void Create(const RenderTargetParam& param, + int& iTextureWidthOut, + int& iTextureHeightOut) = 0; + + [[nodiscard]] virtual auto GetTexture() const -> intptr_t = 0; + + /* Render to this RenderTarget. */ + virtual void StartRenderingTo() = 0; + + /* Stop rendering to this RenderTarget. Update the texture, if necessary, + * and make it available. */ + virtual void FinishRenderingTo() = 0; + + [[nodiscard]] virtual auto InvertY() const -> bool { return false; } + + [[nodiscard]] auto GetParam() const -> const RenderTargetParam& + { + return m_Param; + } + + protected: + RenderTargetParam m_Param; +}; + +class RenderTargetVK : public RenderTarget +{ + public: + void Create(const RenderTargetParam& param, + int& iTextureWidthOut, + int& iTextureHeightOut) override; + auto GetTexture() const -> intptr_t override; + void StartRenderingTo() override; + void FinishRenderingTo() override; + + intptr_t m_Texture; +}; + +#endif diff --git a/src/RageUtil/Graphics/RendererVK/RendererVK.cpp b/src/RageUtil/Graphics/RendererVK/RendererVK.cpp new file mode 100644 index 0000000000..fe352459ad --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/RendererVK.cpp @@ -0,0 +1,1704 @@ +#ifndef NOMINMAX // >:3 +#define NOMINMAX +#endif +#ifdef _WIN32 +#define VK_USE_PLATFORM_WIN32_KHR +#endif +#ifdef __unix__ +#define VK_USE_PLATFORM_XLIB_KHR +#endif + +#define VMA_IMPLEMENTATION +#include "RendererVK.h" + +#include +#include +#include +#include +#include "RenderTargetVK.h" +#include "PlatformUtils.h" + +constexpr uint64_t Timeout = 1000'000'000; + +RendererVK::RendererVK() + : m_Samplers{ nullptr, nullptr, nullptr, nullptr } +{ +} + +std::string +RendererVK::GetApiDescription() const +{ + return "Vulkan"; +} + +void +RendererVK::InitializeRenderer(const VideoModeParams& p) +{ + InitVulkanState(); + InitSwapchain(p); + InitImageViews(); + InitGraphicsPipeline(); + InitCommandPool(); + InitBatchBuffers(); + InitCommandBuffers(); + InitSyncStructures(); + InitTextures(); +} + +/// ---------------------------------------- +/// here be hazards and unsignaled fences... +/// ---------------------------------------- +void +RendererVK::OnRender(const ActualVideoModeParams* p, + const DisplayAdapter::CommandBatcher& batcher) +{ + ThrowIfFail(m_Device.waitForFences( + *m_InFlightFence[m_CurrentFrame], vk::True, Timeout)); + + UpdateBatchBuffers(batcher); + + auto [result, imageIndex] = m_Swapchain.acquireNextImage( + Timeout, *m_PresentCompleteSemaphore[m_CurrentFrame], nullptr); + + if (result == vk::Result::eErrorOutOfDateKHR || m_SwapchainIsInvalid) { + RecreateSwapchain(*p); + m_SwapchainIsInvalid = false; + return; + } + ThrowIfFail(result); + + m_Device.resetFences(*m_InFlightFence[m_CurrentFrame]); + m_CommandBuffers[m_CurrentFrame].reset(); + RecordCommands(imageIndex, batcher); + + vk::PipelineStageFlags waitDestinationStageMask( + vk::PipelineStageFlagBits::eColorAttachmentOutput); + + vk::SubmitInfo submitInfo{}; + submitInfo.waitSemaphoreCount = 1; + submitInfo.pWaitSemaphores = &*m_PresentCompleteSemaphore[m_CurrentFrame]; + submitInfo.pWaitDstStageMask = &waitDestinationStageMask; + submitInfo.commandBufferCount = 1; + submitInfo.pCommandBuffers = &*m_CommandBuffers[m_CurrentFrame]; + submitInfo.signalSemaphoreCount = 1; + submitInfo.pSignalSemaphores = &*m_RenderFinishedSemaphore[imageIndex]; + m_GraphicsQueue.submit(submitInfo, *m_InFlightFence[m_CurrentFrame]); + + vk::PresentInfoKHR presentInfoKHR{}; + presentInfoKHR.waitSemaphoreCount = 1; + presentInfoKHR.pWaitSemaphores = &*m_RenderFinishedSemaphore[imageIndex]; + presentInfoKHR.swapchainCount = 1; + presentInfoKHR.pSwapchains = &*m_Swapchain; + presentInfoKHR.pImageIndices = &imageIndex; + + try { + DISPLAY->FrameLimitBeforeVsync(); + + const auto beforePresent = std::chrono::steady_clock::now(); + result = m_PresentQueue.presentKHR(presentInfoKHR); + const auto afterPresent = std::chrono::steady_clock::now(); + DISPLAY->SetPresentTime(afterPresent - beforePresent); + + DISPLAY->FrameLimitAfterVsync( + DISPLAY->GetActualVideoModeParams()->rate); + } catch (vk::OutOfDateKHRError error) { + RecreateSwapchain(*p); + return; + } + + if (result == vk::Result::eSuboptimalKHR) { + RecreateSwapchain(*p); + return; + } + + m_CurrentFrame = (m_CurrentFrame + 1) % FramesInFlight; +} + +bool +RendererVK::IsD3DInternal() +{ + return false; +} + +intptr_t +RendererVK::CreateTexture(RageSurface* img, bool RGBA8) +{ + assert(m_EmptyTextureSlots.size()); + intptr_t currentHandle = *m_EmptyTextureSlots.begin(); + m_EmptyTextureSlots.erase(currentHandle); + + Texture texture = {}; + texture.width = img->w; + texture.height = img->h; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + + VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; + imageInfo.imageType = VK_IMAGE_TYPE_2D; + imageInfo.format = + RGBA8 ? VK_FORMAT_R8G8B8A8_UNORM : VK_FORMAT_B8G8R8A8_UNORM; + imageInfo.extent = { texture.width, texture.height, 1 }; + imageInfo.mipLevels = 1; + imageInfo.arrayLayers = 1; + imageInfo.samples = VK_SAMPLE_COUNT_1_BIT; + imageInfo.usage = + VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + + VkImage imagePtr = nullptr; + VmaAllocationInfo allocInfo = {}; + ThrowIfFail(vmaCreateImage(m_Allocator, + &imageInfo, + &allocCreateInfo, + &imagePtr, + &texture.allocation, + &allocInfo)); + texture.image = imagePtr; + + vk::ImageViewCreateInfo viewInfo; + viewInfo.image = texture.image; + viewInfo.viewType = vk::ImageViewType::e2D; + viewInfo.format = + RGBA8 ? vk::Format::eR8G8B8A8Unorm : vk::Format::eB8G8R8A8Unorm; + viewInfo.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor; + viewInfo.subresourceRange.levelCount = 1; + viewInfo.subresourceRange.layerCount = 1; + texture.view = (*m_Device).createImageView(viewInfo); + texture.currentLayout = vk::ImageLayout::eUndefined; + m_Textures.insert({ currentHandle, texture }); + + UpdateTexture(currentHandle, img, 0, 0, img->w, img->h); + + for (int i = 0; i < FramesInFlight; i++) { + m_PendingTextureUpdates[i] = true; + } + return currentHandle; +} + +void +RendererVK::UpdateTexture(intptr_t textureHandle, + RageSurface* img, + int xOffset, + int yOffset, + int width, + int height) +{ + assert(xOffset == 0); + assert(yOffset == 0); + assert(width == img->w); + assert(height == img->h); + assert(img->pitch == width * sizeof(uint32_t)); + assert(m_Textures.contains(textureHandle)); + + vk::CommandBufferAllocateInfo bufferInfo = {}; + bufferInfo.level = vk::CommandBufferLevel::ePrimary; + bufferInfo.commandPool = m_CommandPool; + bufferInfo.commandBufferCount = 1; + + auto buffers = m_Device.allocateCommandBuffers(bufferInfo); + assert(buffers.size() == 1); + auto& copyBuffer = buffers[0]; + + vk::CommandBufferBeginInfo beginInfo = {}; + beginInfo.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit; + copyBuffer.begin(beginInfo); + + auto& texture = m_Textures[textureHandle]; + std::memcpy(m_TextureBuffer.GetMappedData(), + img->pixels, + static_cast(img->h) * img->w * sizeof(uint32_t)); + + vk::ImageMemoryBarrier barrier = {}; + if (texture.initialized) { + barrier.oldLayout = vk::ImageLayout::eShaderReadOnlyOptimal; + barrier.newLayout = vk::ImageLayout::eTransferDstOptimal; + barrier.srcAccessMask = vk::AccessFlagBits::eShaderRead; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; + } else { + barrier.srcAccessMask = vk::AccessFlags(); + barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.oldLayout = vk::ImageLayout::eUndefined; + barrier.newLayout = vk::ImageLayout::eTransferDstOptimal; + } + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.image = texture.image; + barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.levelCount = 1; + barrier.subresourceRange.layerCount = 1; + copyBuffer.pipelineBarrier(texture.initialized + ? vk::PipelineStageFlagBits::eAllGraphics + : vk::PipelineStageFlagBits::eTopOfPipe, + vk::PipelineStageFlagBits::eTransfer, + {}, + {}, + {}, + { barrier }); + + vk::BufferImageCopy imageCopy = {}; + imageCopy.imageExtent = + vk::Extent3D{ (uint32_t)width, (uint32_t)height, 1 }; + imageCopy.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eColor; + imageCopy.imageSubresource.mipLevel = 0; + imageCopy.imageSubresource.baseArrayLayer = 0; + imageCopy.imageSubresource.layerCount = 1; + copyBuffer.copyBufferToImage(m_TextureBuffer.buffer, + texture.image, + vk::ImageLayout::eTransferDstOptimal, + { imageCopy }); + + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal; + barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal; + copyBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eAllGraphics, + {}, + {}, + {}, + { barrier }); + + copyBuffer.end(); + + vk::SubmitInfo submitInfo = {}; + submitInfo.commandBufferCount = 1; + submitInfo.pCommandBuffers = &(*copyBuffer); + + vk::FenceCreateInfo fenceInfo; + vk::raii::Fence fence(m_Device, fenceInfo); + m_GraphicsQueue.submit({ submitInfo }, fence); + ThrowIfFail(m_Device.waitForFences({ fence }, VK_TRUE, Timeout)); + + texture.initialized = true; + texture.currentLayout = vk::ImageLayout::eShaderReadOnlyOptimal; +} + +void +RendererVK::DeleteTexture(intptr_t handle) +{ + m_GraphicsQueue.waitIdle(); + + DestroyTexture(m_Textures[handle]); + m_Textures.erase(handle); + m_EmptyTextureSlots.insert(handle); + + for (int i = 0; i < FramesInFlight; i++) { + m_PendingTextureUpdates[i] = true; + } +} + +void +RendererVK::ClearAllTextures() +{ + m_GraphicsQueue.waitIdle(); + + Texture emptyTexture = m_Textures[0]; + for (auto& [handle, texture] : m_Textures) { + if (handle == 0) { + continue; + } + + DestroyTexture(texture); + m_Textures.erase(handle); + m_EmptyTextureSlots.insert(handle); + } + + m_Textures.clear(); + + m_Textures[0] = emptyTexture; + for (int i = 0; i < FramesInFlight; i++) { + m_PendingTextureUpdates[i] = true; + } +} + +RageSurface* +RendererVK::CreateScreenshot() +{ + auto props = + m_PhysicalDevice.getFormatProperties(vk::Format::eR8G8B8A8Unorm); + + bool supportsBlitting = + (props.optimalTilingFeatures & vk::FormatFeatureFlagBits::eBlitSrc) && + (props.linearTilingFeatures & vk::FormatFeatureFlagBits::eBlitDst); + + auto sourceImage = + m_SwapchainImages[((int)m_CurrentFrame - 1 + FramesInFlight) % + FramesInFlight]; + + vk::ImageCreateInfo destImageInfo = {}; + destImageInfo.imageType = vk::ImageType::e2D; + destImageInfo.format = vk::Format::eR8G8B8A8Unorm; + destImageInfo.extent.width = m_SwapchainExtent.width; + destImageInfo.extent.height = m_SwapchainExtent.height; + destImageInfo.extent.depth = 1; + destImageInfo.arrayLayers = 1; + destImageInfo.mipLevels = 1; + destImageInfo.initialLayout = vk::ImageLayout::eUndefined; + destImageInfo.samples = vk::SampleCountFlagBits::e1; + destImageInfo.tiling = vk::ImageTiling::eLinear; + destImageInfo.usage = vk::ImageUsageFlagBits::eTransferDst; + + vk::raii::Image destImage(m_Device, destImageInfo); + vk::MemoryRequirements memoryReqs = destImage.getMemoryRequirements(); + vk::MemoryAllocateInfo memoryAllocInfo = {}; + memoryAllocInfo.allocationSize = memoryReqs.size; + + auto memoryTypeIndex = + GetMemoryType(memoryReqs.memoryTypeBits, + vk::MemoryPropertyFlagBits::eHostVisible | + vk::MemoryPropertyFlagBits::eHostCoherent, + m_PhysicalDevice.getMemoryProperties()); + if (!memoryTypeIndex.has_value()) { + Locator::getLogger()->error("RendererVK: failed to screenshot (can't " + "find memory type for image creation)"); + Fail(); + } + + memoryAllocInfo.memoryTypeIndex = *memoryTypeIndex; + auto destImageMemory = m_Device.allocateMemory(memoryAllocInfo); + destImage.bindMemory(destImageMemory, 0); + + vk::CommandBufferAllocateInfo copyBufferInfo = {}; + copyBufferInfo.level = vk::CommandBufferLevel::ePrimary; + copyBufferInfo.commandPool = m_CommandPool; + copyBufferInfo.commandBufferCount = 1; + vk::raii::CommandBuffer copyBuffer = + std::move(m_Device.allocateCommandBuffers(copyBufferInfo)[0]); + + copyBuffer.begin({}); + + vk::ImageMemoryBarrier barrier = {}; + barrier.srcAccessMask = vk::AccessFlagBits::eNone; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.oldLayout = vk::ImageLayout::eUndefined; + barrier.newLayout = vk::ImageLayout::eTransferDstOptimal; + barrier.image = destImage; + barrier.subresourceRange = + vk::ImageSubresourceRange{ vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 }; + + copyBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eTransfer, + {}, + {}, + {}, + { barrier }); + + barrier.srcAccessMask = vk::AccessFlagBits::eMemoryRead; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead; + barrier.oldLayout = vk::ImageLayout::ePresentSrcKHR; + barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal; + barrier.image = sourceImage; + + copyBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eTransfer, + {}, + {}, + {}, + { barrier }); + + if (supportsBlitting) { + vk::Offset3D blitSize = {}; + blitSize.x = m_SwapchainExtent.width; + blitSize.y = m_SwapchainExtent.height; + blitSize.z = 1; + + vk::ImageBlit blitRegion = {}; + blitRegion.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor; + blitRegion.srcSubresource.layerCount = 1; + blitRegion.srcOffsets[1] = blitSize; + blitRegion.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor; + blitRegion.dstSubresource.layerCount = 1; + blitRegion.dstOffsets[1] = blitSize; + + copyBuffer.blitImage(sourceImage, + vk::ImageLayout::eTransferSrcOptimal, + destImage, + vk::ImageLayout::eTransferDstOptimal, + { blitRegion }, + vk::Filter::eNearest); + } else { + vk::ImageCopy copyRegion = {}; + copyRegion.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor; + copyRegion.srcSubresource.layerCount = 1; + copyRegion.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor; + copyRegion.dstSubresource.layerCount = 1; + copyRegion.extent.width = m_SwapchainExtent.width; + copyRegion.extent.height = m_SwapchainExtent.height; + copyRegion.extent.depth = 1; + + copyBuffer.copyImage(sourceImage, + vk::ImageLayout::eTransferSrcOptimal, + destImage, + vk::ImageLayout::eTransferDstOptimal, + { copyRegion }); + } + + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eMemoryRead; + barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal; + barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal; + barrier.image = destImage; + + copyBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eTransfer, + {}, + {}, + {}, + { barrier }); + + barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead; + barrier.dstAccessMask = vk::AccessFlagBits::eMemoryRead; + barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal; + barrier.newLayout = vk::ImageLayout::ePresentSrcKHR; + barrier.image = sourceImage; + + copyBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eTransfer, + {}, + {}, + {}, + { barrier }); + copyBuffer.end(); + + vk::SubmitInfo submitInfo = {}; + submitInfo.commandBufferCount = 1; + submitInfo.pCommandBuffers = &(*copyBuffer); + + vk::FenceCreateInfo fenceInfo = {}; + vk::raii::Fence fence(m_Device, fenceInfo); + m_GraphicsQueue.submit({ submitInfo }, fence); + ThrowIfFail(m_Device.waitForFences({ fence }, VK_TRUE, Timeout)); + + vk::ImageSubresource subresource{ vk::ImageAspectFlagBits::eColor, 0, 0 }; + vk::SubresourceLayout subresourceLayout = + destImage.getSubresourceLayout(subresource); + + vk::MemoryMapInfo memoryMapInfo = {}; + memoryMapInfo.memory = destImageMemory; + memoryMapInfo.size = VK_WHOLE_SIZE; + + uint8_t* data = nullptr; + ThrowIfFail(vkMapMemory(*m_Device, + *destImageMemory, + 0, + VK_WHOLE_SIZE, + 0, + reinterpret_cast(&data))); + + RageSurface* surface = CreateSurface(m_SwapchainExtent.width, + m_SwapchainExtent.height, + 32, + 0x000000ff, + 0x0000ff00, + 0x00ff0000, + 0xff000000); + + for (size_t i = 0; i < 4LLU * surface->w * surface->h; i++) { + // set alpha to 255 because it broke otherwise for some reason :( + surface->pixels[i] = ((i + 1) % 4) ? data[i] : 255; + } + + vkUnmapMemory(*m_Device, *destImageMemory); + + return surface; +} + +intptr_t +RendererVK::CreateRenderTarget(const RenderTargetParam& param, + int& iTextureWidthOut, + int& iTextureHeightOut) +{ + RenderTargetVK target = {}; + target.Create(param, iTextureWidthOut, iTextureHeightOut); + target.m_Texture = CreateRenderTargetTexture(target.GetParam().iWidth, + target.GetParam().iHeight); + return target.m_Texture; +} + +RendererVK::~RendererVK() +{ + if (m_Device != nullptr) { + m_Device.waitIdle(); + } + + for (auto& [handle, texture] : m_Textures) { + DestroyTexture(texture); + } + + if (m_DepthImage != nullptr) { + m_DepthView = nullptr; + vmaDestroyImage(m_Allocator, m_DepthImage, m_DepthAllocation); + m_DepthImage = nullptr; + m_DepthAllocation = nullptr; + } + + if (m_TextureBuffer.buffer != VK_NULL_HANDLE) { + vmaDestroyBuffer( + m_Allocator, m_TextureBuffer.buffer, m_TextureBuffer.allocation); + m_TextureBuffer.buffer = VK_NULL_HANDLE; + } + + for (int i = 0; i < FramesInFlight; i++) { + if (m_VertexBuffer[i].buffer != VK_NULL_HANDLE) { + vmaDestroyBuffer(m_Allocator, + m_VertexBuffer[i].buffer, + m_VertexBuffer[i].allocation); + m_VertexBuffer[i].buffer = VK_NULL_HANDLE; + } + if (m_IndexBuffer[i].buffer != VK_NULL_HANDLE) { + vmaDestroyBuffer(m_Allocator, + m_IndexBuffer[i].buffer, + m_IndexBuffer[i].allocation); + m_IndexBuffer[i].buffer = VK_NULL_HANDLE; + } + if (m_MatrixStateBuffer[i].buffer != VK_NULL_HANDLE) { + vmaDestroyBuffer(m_Allocator, + m_MatrixStateBuffer[i].buffer, + m_MatrixStateBuffer[i].allocation); + m_MatrixStateBuffer[i].buffer = VK_NULL_HANDLE; + } + if (m_ShaderScratchBuffer[i].buffer != VK_NULL_HANDLE) { + vmaDestroyBuffer(m_Allocator, + m_ShaderScratchBuffer[i].buffer, + m_ShaderScratchBuffer[i].allocation); + m_ShaderScratchBuffer[i].buffer = VK_NULL_HANDLE; + } + } + + // likely the only thing that needs to be cleaned up manually (because + // descriptor pool should exist on descriptor sets' deletion) + m_DescriptorSets.clear(); + + if (m_Allocator != nullptr) { + vmaDestroyAllocator(m_Allocator); + } +} + +static VkBool32 +VulkanDebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData) +{ + switch (messageSeverity) { + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT: { + Locator::getLogger()->trace("RendererVK debug callback: {}", + pCallbackData->pMessage); + break; + } + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT: { + Locator::getLogger()->warn("RendererVK debug callback: {}", + pCallbackData->pMessage); + break; + } + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT: { + Locator::getLogger()->error("RendererVK debug callback: {}", + pCallbackData->pMessage); + break; + } + default: { + Locator::getLogger()->info("RendererVK debug callback: {}", + pCallbackData->pMessage); + break; + } + } + + return VK_FALSE; +} + +void +RendererVK::InitVulkanState() +{ + auto instanceResult = CreateInstance(VulkanDebugCallback); + if (!instanceResult) { + Locator::getLogger()->fatal("RendererVK: instance creation failed - {}", + GetDetailedErrorString(instanceResult)); + Fail(); + } + + m_Instance = vk::raii::Instance(m_Context, instanceResult->instance); +#ifdef VKDEBUG + m_DebugMessenger = vk::raii::DebugUtilsMessengerEXT( + m_Instance, instanceResult->debug_messenger); +#endif + + m_Surface = CreateSurfaceKHR(m_Instance); + + VkPhysicalDeviceVulkan13Features vk13Features = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES + }; + vk13Features.dynamicRendering = vk::True; + vk13Features.synchronization2 = vk::True; + + VkPhysicalDeviceVulkan12Features vk12Features = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES + }; + vk12Features.bufferDeviceAddress = vk::True; + vk12Features.descriptorIndexing = vk::True; + vk12Features.runtimeDescriptorArray = vk::True; + vk12Features.shaderSampledImageArrayNonUniformIndexing = vk::True; + vk12Features.scalarBlockLayout = vk::True; + + VkPhysicalDeviceFeatures vkFeatures = {}; + vkFeatures.samplerAnisotropy = vk::True; + vkFeatures.logicOp = vk::True; + vkFeatures.shaderInt64 = vk::True; + + vkb::PhysicalDeviceSelector selector(*instanceResult); + auto physicalDeviceResult = + selector.set_minimum_version(1, 3) + .set_required_features_13(vk13Features) + .set_required_features_12(vk12Features) + .set_required_features(vkFeatures) + .set_surface(static_cast(m_Surface)) + .add_required_extension(VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME) +#ifdef __APPLE__ + .add_required_extension(VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME) +#endif + .select(); + if (!physicalDeviceResult) { + Locator::getLogger()->fatal( + "RendererVK: physical device creation failed - {}", + GetDetailedErrorString(physicalDeviceResult)); + Fail(); + } + + VkPhysicalDeviceExtendedDynamicState3FeaturesEXT dynamicState3Features{}; + dynamicState3Features.sType = + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_FEATURES_EXT; + dynamicState3Features.extendedDynamicState3ColorBlendEnable = VK_TRUE; + dynamicState3Features.extendedDynamicState3ColorBlendEquation = VK_TRUE; + dynamicState3Features.extendedDynamicState3ColorWriteMask = VK_TRUE; + + vkb::DeviceBuilder deviceBuilder(*physicalDeviceResult); + auto deviceResult = deviceBuilder.add_pNext(&dynamicState3Features).build(); + if (!deviceResult) { + Locator::getLogger()->fatal( + "RendererVK: device creation failed - {}", + GetDetailedErrorString(physicalDeviceResult)); + Fail(); + } + + m_PhysicalDevice = vk::raii::PhysicalDevice( + m_Instance, physicalDeviceResult->physical_device); + m_Device = vk::raii::Device(m_PhysicalDevice, deviceResult->device); + + Locator::getLogger()->debug("RendererVK: selected GPU: {}", + physicalDeviceResult->name); + + m_GraphicsQueue = vk::raii::Queue( + m_Device, deviceResult->get_queue(vkb::QueueType::graphics).value()); + m_GraphicsQueueFamily = + deviceResult->get_queue_index(vkb::QueueType::graphics).value(); + + m_PresentQueue = vk::raii::Queue( + m_Device, deviceResult->get_queue(vkb::QueueType::present).value()); + m_PresentQueueFamily = + deviceResult->get_queue_index(vkb::QueueType::present).value(); + + VmaAllocatorCreateInfo allocatorInfo = {}; + allocatorInfo.physicalDevice = + static_cast(m_PhysicalDevice); + allocatorInfo.device = static_cast(m_Device); + allocatorInfo.instance = static_cast(m_Instance); + allocatorInfo.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT; + + ThrowIfFail(vmaCreateAllocator(&allocatorInfo, &m_Allocator)); + + const std::vector depthFormats{ vk::Format::eD32SfloatS8Uint, + vk::Format::eD24UnormS8Uint }; + for (const auto& format : depthFormats) { + auto props = m_PhysicalDevice.getFormatProperties2(format); + if (props.formatProperties.optimalTilingFeatures & + vk::FormatFeatureFlagBits::eDepthStencilAttachment) { + m_DepthFormat = format; + break; + } + } +} + +void +RendererVK::InitSwapchain(const VideoModeParams& p) +{ + vkb::SwapchainBuilder swapchainBuilder( + *m_PhysicalDevice, *m_Device, *m_Surface); + + swapchainBuilder.set_desired_min_image_count(FramesInFlight) + .set_desired_format( + { VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR }) + .set_desired_format( + { VK_FORMAT_R8G8B8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR }) + .set_desired_present_mode(VK_PRESENT_MODE_IMMEDIATE_KHR) + .set_desired_extent(p.width, p.height) + .set_image_usage_flags(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | + VK_IMAGE_USAGE_TRANSFER_SRC_BIT) + .set_clipped(true); + + auto caps = *m_PhysicalDevice.getSurfaceCapabilitiesKHR(*m_Surface); + swapchainBuilder.set_pre_transform_flags(caps.currentTransform); + + VkCompositeAlphaFlagBitsKHR compositeAlpha = + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; + if (!(caps.supportedCompositeAlpha & compositeAlpha)) { + if (caps.supportedCompositeAlpha & + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR) + compositeAlpha = VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR; + else + compositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR; + } + swapchainBuilder.set_composite_alpha_flags(compositeAlpha); + +#ifdef _WIN32 + VkSurfaceFullScreenExclusiveInfoEXT fullScreenInfo = { + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT + }; + fullScreenInfo.fullScreenExclusive = + p.bWindowIsFullscreenBorderless ? VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT + : VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT; + swapchainBuilder.add_pNext(&fullScreenInfo); +#endif + + auto swapchain_ret = swapchainBuilder.build(); + if (!swapchain_ret) { + Locator::getLogger()->fatal( + "RendererVK: swapchain creation failed - {}", + GetDetailedErrorString(swapchain_ret)); + Fail(); + } + + vkb::Swapchain vkbSwapchain = swapchain_ret.value(); + + m_Swapchain = vk::raii::SwapchainKHR(m_Device, vkbSwapchain.swapchain); + m_SwapchainImages = m_Swapchain.getImages(); + m_ImageFormat = static_cast(vkbSwapchain.image_format); + m_SwapchainExtent = + vk::Extent2D(vkbSwapchain.extent.width, vkbSwapchain.extent.height); + + vk::ImageCreateInfo depthImageInfo = {}; + depthImageInfo.imageType = vk::ImageType::e2D; + depthImageInfo.format = m_DepthFormat; + depthImageInfo.extent = + vk::Extent3D(vkbSwapchain.extent.width, vkbSwapchain.extent.height, 1); + depthImageInfo.mipLevels = 1; + depthImageInfo.arrayLayers = 1; + depthImageInfo.samples = vk::SampleCountFlagBits::e1; + depthImageInfo.tiling = vk::ImageTiling::eOptimal; + depthImageInfo.usage = vk::ImageUsageFlagBits::eDepthStencilAttachment; + depthImageInfo.initialLayout = vk::ImageLayout::eUndefined; + + VmaAllocationCreateInfo depthAllocInfo = {}; + depthAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; + depthAllocInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + ThrowIfFail(vmaCreateImage(m_Allocator, + &*depthImageInfo, + &depthAllocInfo, + &m_DepthImage, + &m_DepthAllocation, + nullptr)); + + vk::ImageViewCreateInfo depthViewInfo = {}; + depthViewInfo.image = m_DepthImage; + depthViewInfo.viewType = vk::ImageViewType::e2D; + depthViewInfo.format = m_DepthFormat; + vk::ImageSubresourceRange subRange = {}; + subRange.aspectMask = vk::ImageAspectFlagBits::eDepth; + subRange.levelCount = 1; + subRange.layerCount = 1; + depthViewInfo.subresourceRange = subRange; + m_DepthView = vk::raii::ImageView(m_Device, depthViewInfo); +} + +void +RendererVK::RecreateSwapchain(const VideoModeParams& p) +{ + m_Device.waitIdle(); + + CleanupSwapchain(); + InitSwapchain(p); + InitImageViews(); + InitSyncStructures(); +} + +void +RendererVK::CleanupSwapchain() +{ + m_SwapchainImageViews.clear(); + m_Swapchain = nullptr; + + if (m_DepthImage != nullptr) { + m_DepthView = nullptr; + vmaDestroyImage(m_Allocator, m_DepthImage, m_DepthAllocation); + m_DepthImage = nullptr; + m_DepthAllocation = nullptr; + } +} + +void +RendererVK::InitImageViews() +{ + m_SwapchainImageViews.clear(); + vk::ImageViewCreateInfo createInfo{}; + createInfo.viewType = vk::ImageViewType::e2D; + createInfo.format = m_ImageFormat; + createInfo.subresourceRange = { + vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 + }; + + for (auto& image : m_SwapchainImages) { + createInfo.image = image; + m_SwapchainImageViews.emplace_back(m_Device, createInfo); + } +} + +void +RendererVK::InitGraphicsPipeline() +{ + CreateGraphicsPipeline( + FILEMAN->ResolvePath("Data/Shaders/Vulkan/vertex.glsl"), + FILEMAN->ResolvePath("Data/Shaders/Vulkan/fragment.glsl")); +} + +std::vector +RendererVK::GetDescriptorBindings() +{ + return { + vk::DescriptorSetLayoutBinding(0, + vk::DescriptorType::eStorageBuffer, + 1, + vk::ShaderStageFlagBits::eVertex), + vk::DescriptorSetLayoutBinding(1, + vk::DescriptorType::eStorageBuffer, + 1, + vk::ShaderStageFlagBits::eVertex), + vk::DescriptorSetLayoutBinding(2, + vk::DescriptorType::eSampledImage, + GetMaxTextureCount(), + vk::ShaderStageFlagBits::eAllGraphics), + vk::DescriptorSetLayoutBinding(3, + vk::DescriptorType::eSampler, + Texture::PossibleSamplerCount, + vk::ShaderStageFlagBits::eAllGraphics) + }; +} + +void +RendererVK::InitCommandPool() +{ + vk::CommandPoolCreateInfo poolInfo{}; + poolInfo.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer; + poolInfo.queueFamilyIndex = m_GraphicsQueueFamily; + + m_CommandPool = vk::raii::CommandPool(m_Device, poolInfo); +} + +void +RendererVK::InitCommandBuffers() +{ + m_CommandBuffers.clear(); + vk::CommandBufferAllocateInfo allocInfo{}; + allocInfo.commandPool = m_CommandPool; + allocInfo.level = vk::CommandBufferLevel::ePrimary; + allocInfo.commandBufferCount = FramesInFlight; + + m_CommandBuffers = vk::raii::CommandBuffers(m_Device, allocInfo); +} + +void +RendererVK::TransitionImageLayout(vk::Image& image, + vk::ImageLayout oldLayout, + vk::ImageLayout newLayout, + vk::AccessFlags2 srcAccessMask, + vk::AccessFlags2 dstAccessMask, + vk::PipelineStageFlags2 srcStageMask, + vk::PipelineStageFlags2 dstStageMask, + vk::raii::CommandBuffer& commandBuffer) +{ + if (oldLayout == newLayout) { + return; + } + + vk::ImageMemoryBarrier2 barrier{}; + barrier.srcStageMask = srcStageMask; + barrier.srcAccessMask = srcAccessMask; + barrier.dstStageMask = dstStageMask; + barrier.dstAccessMask = dstAccessMask; + barrier.oldLayout = oldLayout; + barrier.newLayout = newLayout; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.image = image; + + vk::ImageSubresourceRange range{}; + range.aspectMask = vk::ImageAspectFlagBits::eColor; + range.baseMipLevel = 0; + range.levelCount = 1; + range.baseArrayLayer = 0; + range.layerCount = 1; + + barrier.subresourceRange = range; + + vk::DependencyInfo dependencyInfo{}; + dependencyInfo.dependencyFlags = {}; + dependencyInfo.imageMemoryBarrierCount = 1; + dependencyInfo.pImageMemoryBarriers = &barrier; + + commandBuffer.pipelineBarrier2(dependencyInfo); +} + +void +RendererVK::InitSyncStructures() +{ + m_PresentCompleteSemaphore.clear(); + m_RenderFinishedSemaphore.clear(); + m_InFlightFence.clear(); + + for (size_t i = 0; i < FramesInFlight; i++) { + m_PresentCompleteSemaphore.emplace_back(m_Device, + vk::SemaphoreCreateInfo()); + m_InFlightFence.emplace_back( + m_Device, vk::FenceCreateInfo(vk::FenceCreateFlagBits::eSignaled)); + } + for (size_t i = 0; i < m_SwapchainImages.size(); i++) { + m_RenderFinishedSemaphore.emplace_back(m_Device, + vk::SemaphoreCreateInfo()); + } +} + +void +RendererVK::RecordCommands(uint32_t imageIndex, + const DisplayAdapter::CommandBatcher& batcher) +{ + auto& buffer = m_CommandBuffers[m_CurrentFrame]; + buffer.begin({}); + + buffer.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, + *m_Pipelines[0].PipelineLayout, + 0, + { *m_DescriptorSets[m_CurrentFrame] }, + nullptr); + buffer.bindIndexBuffer( + m_IndexBuffer[m_CurrentFrame].Get(), 0, vk::IndexType::eUint32); + + intptr_t currentPipeline = -1; + + for (auto& node : batcher.m_RenderNodes) { + bool swapchain = node.RenderTarget == 0; + + auto image = swapchain ? m_SwapchainImages[imageIndex] + : m_Textures[node.RenderTarget].image; + auto view = swapchain ? m_SwapchainImageViews[imageIndex] + : m_Textures[node.RenderTarget].view; + auto extent = swapchain + ? m_SwapchainExtent + : vk::Extent2D(m_Textures[node.RenderTarget].width, + m_Textures[node.RenderTarget].height); + + TransitionImageLayout( + image, + swapchain ? vk::ImageLayout::eUndefined + : m_Textures[node.RenderTarget].currentLayout, + vk::ImageLayout::eColorAttachmentOptimal, + swapchain ? vk::AccessFlags2() : vk::AccessFlagBits2::eShaderRead, + vk::AccessFlagBits2::eColorAttachmentWrite | + vk::AccessFlagBits2::eColorAttachmentRead, + swapchain ? vk::PipelineStageFlagBits2::eColorAttachmentOutput + : vk::PipelineStageFlagBits2::eAllGraphics, + vk::PipelineStageFlagBits2::eColorAttachmentOutput, + buffer); + + if (!swapchain) { + m_Textures[node.RenderTarget].currentLayout = + vk::ImageLayout::eColorAttachmentOptimal; + } + + vk::RenderingAttachmentInfo colorInfo = {}; + colorInfo.imageView = view; + colorInfo.imageLayout = vk::ImageLayout::eColorAttachmentOptimal; + colorInfo.storeOp = vk::AttachmentStoreOp::eStore; + if (node.PreserveRenderTarget && !swapchain) { + colorInfo.loadOp = vk::AttachmentLoadOp::eLoad; + } else { + colorInfo.loadOp = vk::AttachmentLoadOp::eClear; + colorInfo.clearValue = vk::ClearColorValue(0.0f, 0.0f, 0.0f, 0.0f); + } + + vk::RenderingAttachmentInfo depthInfo = {}; + depthInfo.imageView = m_DepthView; + depthInfo.imageLayout = vk::ImageLayout::eAttachmentOptimal; + depthInfo.loadOp = vk::AttachmentLoadOp::eClear; + depthInfo.storeOp = vk::AttachmentStoreOp::eDontCare; + depthInfo.clearValue = vk::ClearDepthStencilValue(1.0f, 0); + + vk::RenderingInfo renderInfo = {}; + renderInfo.renderArea = + vk::Rect2D{ { 0, 0 }, { extent.width, extent.height } }; + renderInfo.layerCount = 1; + renderInfo.colorAttachmentCount = 1; + renderInfo.pColorAttachments = &colorInfo; + renderInfo.pDepthAttachment = &depthInfo; + + buffer.setScissor( + 0, + vk::Rect2D(vk::Offset2D(0, 0), + vk::Extent2D(extent.width, extent.height))); + buffer.setViewport(0, + vk::Viewport(0.0f, + static_cast(extent.height), + static_cast(extent.width), + -static_cast(extent.height), + 0.0f, + 1.0f)); + + buffer.beginRendering(renderInfo); + + for (auto& call : node.DrawCalls) { + buffer.setDepthTestEnable( + call.DepthTestMode != ZTEST_OFF ? VK_TRUE : VK_FALSE); + buffer.setDepthWriteEnable( + call.DepthWriteEnabled ? VK_TRUE : VK_FALSE); + + vk::CompareOp depthCompareOp = vk::CompareOp::eAlways; + switch (call.DepthTestMode) { + case ZTEST_OFF: { + break; + } + case ZTEST_WRITE_ON_PASS: { + depthCompareOp = vk::CompareOp::eLessOrEqual; + break; + } + case ZTEST_WRITE_ON_FAIL: { + depthCompareOp = vk::CompareOp::eGreater; + break; + } + default: { + Locator::getLogger()->error( + "Invalid ZTestMode encountered: {}", + call.DepthTestMode); + Fail(); + } + } + buffer.setDepthCompareOp(vk::CompareOp::eLessOrEqual); + + SetBlendMode(call.BlendingMode, buffer); + + if (call.Settings.GraphicsPipeline != currentPipeline) { + currentPipeline = call.Settings.GraphicsPipeline; + buffer.bindPipeline( + vk::PipelineBindPoint::eGraphics, + m_Pipelines[currentPipeline].GraphicsPipeline); + } + + uint64_t vertexArg = + call.Settings.VertexShaderArg == UINT64_MAX + ? 0 + : m_ShaderScratchBuffer[m_CurrentFrame].gpuAddress + + call.Settings.VertexShaderArg; + + buffer.pushConstants(*m_Pipelines[0].PipelineLayout, + vk::ShaderStageFlagBits::eVertex | + vk::ShaderStageFlagBits::eFragment, + 0, + { vertexArg }); + + uint64_t fragArg = + call.Settings.FragShaderArg == UINT64_MAX + ? 0 + : m_ShaderScratchBuffer[m_CurrentFrame].gpuAddress + + call.Settings.FragShaderArg; + + buffer.pushConstants(*m_Pipelines[0].PipelineLayout, + vk::ShaderStageFlagBits::eVertex | + vk::ShaderStageFlagBits::eFragment, + sizeof(uint64_t), + { fragArg }); + + buffer.drawIndexed(call.IndexCount, 1, call.IndexOffset, 0, 0); + } + + buffer.endRendering(); + + TransitionImageLayout( + image, + swapchain ? vk::ImageLayout::eColorAttachmentOptimal + : m_Textures[node.RenderTarget].currentLayout, + swapchain ? vk::ImageLayout::ePresentSrcKHR + : vk::ImageLayout::eShaderReadOnlyOptimal, + vk::AccessFlagBits2::eColorAttachmentWrite, + swapchain ? vk::AccessFlags2() : vk::AccessFlagBits2::eShaderRead, + vk::PipelineStageFlagBits2::eColorAttachmentOutput, + swapchain ? vk::PipelineStageFlagBits2::eBottomOfPipe + : vk::PipelineStageFlagBits2::eAllGraphics, + buffer); + + if (!swapchain) { + m_Textures[node.RenderTarget].currentLayout = + vk::ImageLayout::eShaderReadOnlyOptimal; + } + } + + buffer.end(); +} + +void +RendererVK::SetBlendMode(BlendMode mode, vk::raii::CommandBuffer& buffer) +{ + vk::Bool32 enableBlending = VK_TRUE; + vk::ColorComponentFlags colorWriteMask = + vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | + vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA; + + vk::ColorBlendEquationEXT blendEquation{}; + blendEquation.colorBlendOp = vk::BlendOp::eAdd; + blendEquation.alphaBlendOp = vk::BlendOp::eAdd; + + switch (mode) { + case BLEND_NORMAL: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eSrcAlpha; + blendEquation.dstColorBlendFactor = + vk::BlendFactor::eOneMinusSrcAlpha; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha; + blendEquation.dstAlphaBlendFactor = + vk::BlendFactor::eOneMinusSrcAlpha; + break; + } + + case BLEND_ADD: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eSrcAlpha; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eOne; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eOne; + break; + } + + case BLEND_SUBTRACT: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eSrcAlpha; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eZero; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eZero; + break; + } + + case BLEND_MODULATE: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eZero; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eSrcColor; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eZero; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eSrcColor; + break; + } + + case BLEND_COPY_SRC: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eOne; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eZero; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eOne; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eZero; + break; + } + + case BLEND_ALPHA_MASK: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eZero; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eOne; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eZero; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eSrcAlpha; + break; + } + + case BLEND_ALPHA_KNOCK_OUT: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eZero; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eOne; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eZero; + blendEquation.dstAlphaBlendFactor = + vk::BlendFactor::eOneMinusSrcAlpha; + break; + } + + case BLEND_ALPHA_MULTIPLY: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eSrcAlpha; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eZero; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eZero; + break; + } + + case BLEND_WEIGHTED_MULTIPLY: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eDstColor; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eSrcColor; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eDstColor; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eSrcColor; + break; + } + + case BLEND_INVERT_DEST: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eOne; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eOne; + blendEquation.colorBlendOp = vk::BlendOp::eSubtract; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eOne; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eOne; + blendEquation.alphaBlendOp = vk::BlendOp::eSubtract; + break; + } + + case BLEND_NO_EFFECT: { + blendEquation.srcColorBlendFactor = vk::BlendFactor::eZero; + blendEquation.dstColorBlendFactor = vk::BlendFactor::eOne; + blendEquation.srcAlphaBlendFactor = vk::BlendFactor::eZero; + blendEquation.dstAlphaBlendFactor = vk::BlendFactor::eOne; + colorWriteMask = vk::ColorComponentFlags(0); + break; + } + + default: { + Locator::getLogger()->error("Invalid BlendMode: {}", mode); + Fail(); + } + } + + buffer.setColorBlendEnableEXT(0, { enableBlending }); + buffer.setColorWriteMaskEXT(0, { colorWriteMask }); + buffer.setColorBlendEquationEXT(0, { blendEquation }); +} + +void +RendererVK::InitBatchBuffers() +{ + uint32_t textureDims = GetMaxTextureSize(); + + VmaAllocationCreateInfo textureAllocInfo = {}; + textureAllocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + textureAllocInfo.usage = VMA_MEMORY_USAGE_CPU_TO_GPU; + textureAllocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + + vk::BufferCreateInfo textureInfo = {}; + textureInfo.size = + (vk::DeviceSize)textureDims * textureDims * sizeof(uint32_t); + textureInfo.usage = vk::BufferUsageFlagBits::eTransferSrc; + m_TextureBuffer.Init(m_Allocator, textureInfo, textureAllocInfo); + + vk::DescriptorPoolSize poolSizes[3] = {}; + poolSizes[0].type = vk::DescriptorType::eStorageBuffer; + poolSizes[0].descriptorCount = 2 * FramesInFlight; + poolSizes[1].type = vk::DescriptorType::eSampledImage; + poolSizes[1].descriptorCount = GetMaxTextureCount() * FramesInFlight; + poolSizes[2].type = vk::DescriptorType::eSampler; + poolSizes[2].descriptorCount = + Texture::PossibleSamplerCount * FramesInFlight; + + vk::DescriptorPoolCreateInfo poolInfo = {}; + poolInfo.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet; + poolInfo.poolSizeCount = 3; + poolInfo.pPoolSizes = poolSizes; + poolInfo.maxSets = FramesInFlight; + m_DescriptorPool = vk::raii::DescriptorPool(m_Device, poolInfo); + + std::vector bindings = + GetDescriptorBindings(); + vk::DescriptorSetLayoutCreateInfo layoutInfo({}, bindings); + + m_DescriptorSetLayout = vk::raii::DescriptorSetLayout(m_Device, layoutInfo); + + std::vector layouts(FramesInFlight, + *m_DescriptorSetLayout); + + vk::DescriptorSetAllocateInfo allocInfo( + *m_DescriptorPool, FramesInFlight, layouts.data()); + m_DescriptorSets = m_Device.allocateDescriptorSets(allocInfo); + + for (int i = 0; i < FramesInFlight; i++) { + vk::BufferCreateInfo vertexBufferInfo{}; + vertexBufferInfo.size = sizeof(DisplayAdapter::Vertex) * MaxDrawCount; + vertexBufferInfo.usage = vk::BufferUsageFlagBits::eStorageBuffer; + VmaAllocationCreateInfo vertexAllocInfo = {}; + vertexAllocInfo.usage = VMA_MEMORY_USAGE_CPU_TO_GPU; + vertexAllocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + vertexAllocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + m_VertexBuffer[i].Init(m_Allocator, vertexBufferInfo, vertexAllocInfo); + + vk::BufferCreateInfo indexBufferInfo{}; + indexBufferInfo.size = sizeof(uint32_t) * 5 * MaxDrawCount; + indexBufferInfo.usage = vk::BufferUsageFlagBits::eIndexBuffer; + VmaAllocationCreateInfo indexAllocInfo = {}; + indexAllocInfo.usage = VMA_MEMORY_USAGE_CPU_TO_GPU; + indexAllocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + indexAllocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + m_IndexBuffer[i].Init(m_Allocator, indexBufferInfo, indexAllocInfo); + + vk::BufferCreateInfo matrixBufferInfo{}; + matrixBufferInfo.size = + sizeof(DisplayAdapter::MatrixState) * MaxDrawCount; + matrixBufferInfo.usage = vk::BufferUsageFlagBits::eStorageBuffer; + VmaAllocationCreateInfo matrixAllocInfo = {}; + matrixAllocInfo.usage = VMA_MEMORY_USAGE_CPU_TO_GPU; + matrixAllocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + matrixAllocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + m_MatrixStateBuffer[i].Init( + m_Allocator, matrixBufferInfo, matrixAllocInfo); + + vk::BufferCreateInfo scratchBufferInfo{}; + scratchBufferInfo.size = sizeof(uint8_t) * 1'000'000; + scratchBufferInfo.usage = vk::BufferUsageFlagBits::eStorageBuffer | + vk::BufferUsageFlagBits::eShaderDeviceAddress; + VmaAllocationCreateInfo scratchAllocInfo = {}; + scratchAllocInfo.usage = VMA_MEMORY_USAGE_CPU_TO_GPU; + scratchAllocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + scratchAllocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + m_ShaderScratchBuffer[i].Init( + m_Allocator, scratchBufferInfo, scratchAllocInfo); + vk::BufferDeviceAddressInfo scratchAddressInfo = {}; + scratchAddressInfo.buffer = m_ShaderScratchBuffer[i].buffer; + m_ShaderScratchBuffer[i].gpuAddress = + m_Device.getBufferAddress(scratchAddressInfo); + + vk::DescriptorBufferInfo triangleInfo( + m_VertexBuffer[i].Get(), 0, VK_WHOLE_SIZE); + vk::DescriptorBufferInfo matrixInfo( + m_MatrixStateBuffer[i].Get(), 0, VK_WHOLE_SIZE); + + std::vector writes = { + vk::WriteDescriptorSet(m_DescriptorSets[i], + 0, + 0, + 1, + vk::DescriptorType::eStorageBuffer, + nullptr, + &triangleInfo, + nullptr), + vk::WriteDescriptorSet(m_DescriptorSets[i], + 1, + 0, + 1, + vk::DescriptorType::eStorageBuffer, + nullptr, + &matrixInfo, + nullptr) + }; + + m_Device.updateDescriptorSets(writes, nullptr); + } +} + +void +RendererVK::UpdateBatchBuffers(const DisplayAdapter::CommandBatcher& batcher) +{ + if (m_PendingTextureUpdates[m_CurrentFrame]) { + m_PendingTextureUpdates[m_CurrentFrame] = false; + + std::vector textureInfo(GetMaxTextureCount()); + for (int i = 0; i < textureInfo.size(); i++) { + textureInfo[i].imageLayout = + vk::ImageLayout::eShaderReadOnlyOptimal; + + if (m_EmptyTextureSlots.contains(i)) { + textureInfo[i].imageView = m_Textures[0].view; + } else { + textureInfo[i].imageView = m_Textures[i].view; + } + } + + vk::WriteDescriptorSet writeDescriptor = {}; + writeDescriptor.dstSet = m_DescriptorSets[m_CurrentFrame]; + writeDescriptor.dstBinding = 2; + writeDescriptor.descriptorCount = textureInfo.size(); + writeDescriptor.descriptorType = vk::DescriptorType::eSampledImage; + writeDescriptor.pImageInfo = textureInfo.data(); + + m_Device.updateDescriptorSets({ writeDescriptor }, {}); + } + + if (batcher.m_VertexBuffer.empty()) { + return; + } + + std::memcpy(m_VertexBuffer[m_CurrentFrame].GetMappedData(), + batcher.m_VertexBuffer.data(), + sizeof(DisplayAdapter::Vertex) * batcher.m_VertexBuffer.size()); + + std::memcpy(m_IndexBuffer[m_CurrentFrame].GetMappedData(), + batcher.m_IndexBuffer.data(), + sizeof(uint32_t) * batcher.m_IndexBuffer.size()); + + std::memcpy(m_ShaderScratchBuffer[m_CurrentFrame].GetMappedData(), + batcher.m_ShaderScratchBuffer.data(), + sizeof(uint8_t) * batcher.m_ShaderScratchBuffer.size()); + + std::memcpy(m_MatrixStateBuffer[m_CurrentFrame].GetMappedData(), + batcher.m_MatrixStateBuffer.data(), + sizeof(DisplayAdapter::MatrixState) * + batcher.m_MatrixStateBuffer.size()); +} + +int +RendererVK::GetMaxTextureSize() +{ + return std::min( + 4096u, m_PhysicalDevice.getProperties().limits.maxImageDimension2D); +} + +int +RendererVK::GetMaxTextureCount() +{ + return std::min( + static_cast(Texture::MaxTextures), + m_PhysicalDevice.getProperties().limits.maxDescriptorSetSampledImages / + FramesInFlight); +} + +void +RendererVK::DestroyTexture(Texture& texture) +{ + if (texture.image) { + vmaDestroyImage(m_Allocator, texture.image, texture.allocation); + } + if (texture.view) { + vkDestroyImageView(*m_Device, texture.view, nullptr); + } + + texture = {}; +} + +void +RendererVK::InitTextures() +{ + vk::PhysicalDeviceProperties properties = m_PhysicalDevice.getProperties(); + vk::SamplerCreateInfo samplerInfo = {}; + samplerInfo.mipmapMode = vk::SamplerMipmapMode::eLinear; + samplerInfo.anisotropyEnable = vk::True; + samplerInfo.maxAnisotropy = properties.limits.maxSamplerAnisotropy; + samplerInfo.compareEnable = vk::False; + samplerInfo.compareOp = vk::CompareOp::eAlways; + std::array + samplerImageInfo; + + for (size_t i = 0; i < m_Samplers.size(); i++) { + samplerInfo.magFilter = + (i & Texture::Filtering) ? vk::Filter::eLinear : vk::Filter::eNearest; + samplerInfo.minFilter = + (i & Texture::Filtering) ? vk::Filter::eLinear : vk::Filter::eNearest; + samplerInfo.addressModeU = (i & Texture::Wrapping) + ? vk::SamplerAddressMode::eRepeat + : vk::SamplerAddressMode::eClampToBorder; + samplerInfo.addressModeV = (i & Texture::Wrapping) + ? vk::SamplerAddressMode::eRepeat + : vk::SamplerAddressMode::eClampToBorder; + m_Samplers[i] = vk::raii::Sampler(m_Device, samplerInfo); + samplerImageInfo[i].sampler = m_Samplers[i]; + } + + for (int i = 0; i < FramesInFlight; i++) { + vk::WriteDescriptorSet writeDescriptor = {}; + writeDescriptor.dstSet = m_DescriptorSets[i]; + writeDescriptor.dstBinding = 3; + writeDescriptor.descriptorCount = Texture::PossibleSamplerCount; + writeDescriptor.descriptorType = vk::DescriptorType::eSampler; + writeDescriptor.pImageInfo = samplerImageInfo.data(); + + m_Device.updateDescriptorSets({ writeDescriptor }, nullptr); + } + + for (int i = 0; i < GetMaxTextureCount(); i++) { + m_EmptyTextureSlots.insert(i); + } + + RageSurface* img = + CreateSurface(1, 1, 32, 0x000000ff, 0x0000ff00, 0x00ff0000, 0xff000000); + CreateTexture(img, true); +} + +void +RendererVK::ResolutionChanged() +{ + m_SwapchainIsInvalid = true; +} + +intptr_t +RendererVK::CreateRenderTargetTexture(int width, int height) +{ + assert(m_EmptyTextureSlots.size()); + intptr_t currentHandle = *m_EmptyTextureSlots.begin(); + m_EmptyTextureSlots.erase(currentHandle); + + Texture texture = {}; + texture.width = width; + texture.height = height; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + + VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; + imageInfo.imageType = VK_IMAGE_TYPE_2D; + imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM; + imageInfo.extent = { texture.width, texture.height, 1 }; + imageInfo.mipLevels = 1; + imageInfo.arrayLayers = 1; + imageInfo.samples = VK_SAMPLE_COUNT_1_BIT; + imageInfo.usage = + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + + VkImage imagePtr = nullptr; + VmaAllocationInfo allocInfo = {}; + ThrowIfFail(vmaCreateImage(m_Allocator, + &imageInfo, + &allocCreateInfo, + &imagePtr, + &texture.allocation, + &allocInfo)); + texture.image = imagePtr; + + vk::ImageViewCreateInfo viewInfo; + viewInfo.image = texture.image; + viewInfo.viewType = vk::ImageViewType::e2D; + viewInfo.format = vk::Format::eR8G8B8A8Unorm; + viewInfo.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor; + viewInfo.subresourceRange.levelCount = 1; + viewInfo.subresourceRange.layerCount = 1; + texture.view = (*m_Device).createImageView(viewInfo); + texture.currentLayout = vk::ImageLayout::eUndefined; + + m_Textures.insert({ currentHandle, texture }); + + return currentHandle; +} + +intptr_t +RendererVK::CreateGraphicsPipeline(const std::string& vertexShaderPath, + const std::string& fragmentShaderPath) +{ + auto previousPipeline = + m_PipelineLookup.find({ vertexShaderPath, fragmentShaderPath }); + + if (previousPipeline != m_PipelineLookup.end()) { + return previousPipeline->second; + } else { + m_PipelineLookup[{ vertexShaderPath, fragmentShaderPath }] = + m_Pipelines.size(); + } + + PipelineInfo info = {}; + + auto fragmentShader = + LoadShaderFromFile(fragmentShaderPath, m_Device, ShaderType_Fragment); + auto vertexShader = + LoadShaderFromFile(vertexShaderPath, m_Device, ShaderType_Vertex); + + vk::PipelineShaderStageCreateInfo vertexShaderStageInfo{}; + vertexShaderStageInfo.stage = vk::ShaderStageFlagBits::eVertex; + vertexShaderStageInfo.module = vertexShader, + vertexShaderStageInfo.pName = "main"; + + vk::PipelineShaderStageCreateInfo fragmentShaderStageInfo{}; + fragmentShaderStageInfo.stage = vk::ShaderStageFlagBits::eFragment; + fragmentShaderStageInfo.module = fragmentShader, + fragmentShaderStageInfo.pName = "main"; + + vk::PipelineShaderStageCreateInfo shaderStages[] = { + vertexShaderStageInfo, fragmentShaderStageInfo + }; + + std::vector dynamicStates = { vk::DynamicState::eViewport, + vk::DynamicState::eScissor, + vk::DynamicState::eDepthWriteEnable, + vk::DynamicState::eDepthTestEnable, + vk::DynamicState::eDepthCompareOp, + vk::DynamicState::eColorBlendEnableEXT, + vk::DynamicState::eColorBlendEquationEXT, + vk::DynamicState::eColorWriteMaskEXT }; + + vk::PipelineDynamicStateCreateInfo dynamicState{}; + dynamicState.dynamicStateCount = + static_cast(dynamicStates.size()); + dynamicState.pDynamicStates = dynamicStates.data(); + + vk::PipelineInputAssemblyStateCreateInfo inputAssembly{}; + inputAssembly.topology = vk::PrimitiveTopology::eTriangleList; + + vk::PipelineViewportStateCreateInfo viewportState({}, 1, {}, 1); + + vk::PipelineRasterizationStateCreateInfo rasterizer{}; + rasterizer.depthClampEnable = vk::False; + rasterizer.rasterizerDiscardEnable = vk::False; + rasterizer.polygonMode = vk::PolygonMode::eFill; + rasterizer.cullMode = vk::CullModeFlagBits::eBack; + rasterizer.frontFace = vk::FrontFace::eCounterClockwise; + rasterizer.depthBiasEnable = vk::False; + rasterizer.depthBiasSlopeFactor = 1.0f; + rasterizer.lineWidth = 1.0f; + + vk::PipelineMultisampleStateCreateInfo multisampling{}; + multisampling.rasterizationSamples = vk::SampleCountFlagBits::e1; + multisampling.sampleShadingEnable = vk::False; + + if (m_DescriptorSetLayout == nullptr) { + std::vector bindings = + GetDescriptorBindings(); + vk::DescriptorSetLayoutCreateInfo layoutInfo({}, bindings); + + m_DescriptorSetLayout = + vk::raii::DescriptorSetLayout(m_Device, layoutInfo); + } + + std::array pushConstants = {}; + pushConstants[0].size = sizeof(uint64_t) * 2; + pushConstants[0].stageFlags = + vk::ShaderStageFlagBits::eVertex | vk::ShaderStageFlagBits::eFragment; + + vk::PipelineLayoutCreateInfo pipelineLayoutInfo = {}; + pipelineLayoutInfo.setLayoutCount = 1; + pipelineLayoutInfo.pSetLayouts = &*m_DescriptorSetLayout; + pipelineLayoutInfo.pushConstantRangeCount = 1; + pipelineLayoutInfo.pPushConstantRanges = pushConstants.data(); + + info.PipelineLayout = + vk::raii::PipelineLayout(m_Device, pipelineLayoutInfo); + + vk::PipelineColorBlendAttachmentState colorBlendAttachment{}; + colorBlendAttachment.blendEnable = VK_FALSE; + colorBlendAttachment.colorWriteMask = + vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | + vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA; + + vk::PipelineColorBlendStateCreateInfo colorBlending{}; + colorBlending.attachmentCount = 1; + colorBlending.pAttachments = &colorBlendAttachment; + + vk::PipelineDepthStencilStateCreateInfo depthStencil{}; + depthStencil.depthTestEnable = VK_TRUE; + depthStencil.depthWriteEnable = VK_TRUE; + depthStencil.depthCompareOp = vk::CompareOp::eLessOrEqual; + + vk::PipelineRenderingCreateInfo pipelineRenderingCreateInfo{}; + pipelineRenderingCreateInfo.colorAttachmentCount = 1; + pipelineRenderingCreateInfo.pColorAttachmentFormats = &m_ImageFormat; + pipelineRenderingCreateInfo.depthAttachmentFormat = m_DepthFormat; + + // we don't actually need any vertex info since we're reading stuffs from + // the storage buffer + vk::PipelineVertexInputStateCreateInfo vertexInfo = {}; + + vk::GraphicsPipelineCreateInfo pipelineInfo = {}; + pipelineInfo.pNext = &pipelineRenderingCreateInfo; + pipelineInfo.stageCount = 2; + pipelineInfo.pStages = shaderStages; + pipelineInfo.pInputAssemblyState = &inputAssembly; + pipelineInfo.pViewportState = &viewportState; + pipelineInfo.pRasterizationState = &rasterizer; + pipelineInfo.pMultisampleState = &multisampling; + pipelineInfo.pDynamicState = &dynamicState; + pipelineInfo.pVertexInputState = &vertexInfo; + pipelineInfo.layout = info.PipelineLayout; + pipelineInfo.renderPass = nullptr; + pipelineInfo.pDepthStencilState = &depthStencil; + pipelineInfo.pColorBlendState = &colorBlending; + + info.GraphicsPipeline = vk::raii::Pipeline(m_Device, nullptr, pipelineInfo); + + m_Pipelines.push_back(std::move(info)); + return static_cast(m_Pipelines.size() - 1); +} + +void +RendererVK::TryVideoMode(const VideoModeParams& params) +{ + m_Device.waitIdle(); + + m_Surface = CreateSurfaceKHR(m_Instance); + RecreateSwapchain(params); +} diff --git a/src/RageUtil/Graphics/RendererVK/RendererVK.h b/src/RageUtil/Graphics/RendererVK/RendererVK.h new file mode 100644 index 0000000000..66799f920d --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/RendererVK.h @@ -0,0 +1,162 @@ +#ifndef RENDERER_VULKAN_H +#define RENDERER_VULKAN_H + +#include "RageUtil/Graphics/Display/Renderer.h" +#include "Core/Services/Locator.hpp" + +#ifdef DEBUG +#define VKDEBUG 1 +#endif +#ifdef _DEBUG +#define VKDEBUG 1 +#endif + +#ifdef VKDEBUG +#define VMA_DEBUG_LOG +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#define VMA_DEBUG_LOG_FORMAT(format, ...) \ + do { \ + char buffer[256]; \ + snprintf(buffer, sizeof(buffer), format, __VA_ARGS__); \ + std::string str(buffer); \ + Locator::getLogger()->debug("VulkanMemoryAllocator: " + str); \ + } while (false) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include "VkUtils.h" +#include "Texture.h" +#include "PersistentBuffer.h" + +struct PipelineInfo +{ + vk::raii::PipelineLayout PipelineLayout = nullptr; + vk::raii::Pipeline GraphicsPipeline = nullptr; +}; + +class RendererVK : public DisplayAdapter::Renderer +{ + public: + RendererVK(); + std::string GetApiDescription() const override; + void InitializeRenderer(const VideoModeParams& p) override; + void OnRender(const ActualVideoModeParams* p, + const DisplayAdapter::CommandBatcher& batcher) override; + bool IsD3DInternal() override; + intptr_t CreateTexture(RageSurface* img, bool RGBA8) override; + void UpdateTexture(intptr_t textureHandle, + RageSurface* img, + int xOffset, + int yOffset, + int width, + int height) override; + void DeleteTexture(intptr_t handle) override; + void ClearAllTextures() override; + RageSurface* CreateScreenshot() override; + intptr_t CreateRenderTarget(const RenderTargetParam& param, + int& iTextureWidthOut, + int& iTextureHeightOut) override; + intptr_t CreateGraphicsPipeline( + const std::string& vertexShaderPath, + const std::string& fragmentShaderPath) override; + void TryVideoMode(const VideoModeParams& params) override; + ~RendererVK() override; + + private: + vk::raii::Context m_Context; + vk::raii::Instance m_Instance = nullptr; + vk::raii::DebugUtilsMessengerEXT m_DebugMessenger = nullptr; + vk::raii::PhysicalDevice m_PhysicalDevice = nullptr; + vk::raii::Device m_Device = nullptr; + vk::raii::SurfaceKHR m_Surface = nullptr; + vk::raii::Queue m_GraphicsQueue = nullptr; + uint32_t m_GraphicsQueueFamily = 0; + vk::raii::Queue m_PresentQueue = nullptr; + uint32_t m_PresentQueueFamily = 0; + VmaAllocator m_Allocator = nullptr; + vk::Format m_DepthFormat = {}; + void InitVulkanState(); + + vk::raii::SwapchainKHR m_Swapchain = nullptr; + vk::Extent2D m_SwapchainExtent; + std::vector m_SwapchainImages; + vk::Format m_ImageFormat = {}; + VkImage m_DepthImage = nullptr; + VmaAllocation m_DepthAllocation = nullptr; + vk::raii::ImageView m_DepthView = nullptr; + + bool m_SwapchainIsInvalid = false; + void InitSwapchain(const VideoModeParams& p); + void RecreateSwapchain(const VideoModeParams& p); + void CleanupSwapchain(); + + std::vector m_SwapchainImageViews; + void InitImageViews(); + + std::vector m_Pipelines; + vk::raii::DescriptorSetLayout m_DescriptorSetLayout = nullptr; + std::map, intptr_t> m_PipelineLookup; + void InitGraphicsPipeline(); + std::vector GetDescriptorBindings(); + + vk::raii::CommandPool m_CommandPool = nullptr; + void InitCommandPool(); + + std::vector m_CommandBuffers; + void InitCommandBuffers(); + + void TransitionImageLayout(vk::Image& image, + vk::ImageLayout oldLayout, + vk::ImageLayout newLayout, + vk::AccessFlags2 srcAccessMask, + vk::AccessFlags2 dstAccessMask, + vk::PipelineStageFlags2 srcStageMask, + vk::PipelineStageFlags2 dstStageMask, + vk::raii::CommandBuffer& commandBuffer); + + std::vector m_PresentCompleteSemaphore; + std::vector m_RenderFinishedSemaphore; + std::vector m_InFlightFence; + uint32_t m_CurrentFrame = 0; + void InitSyncStructures(); + void RecordCommands(uint32_t imageIndex, + const DisplayAdapter::CommandBatcher& batcher); + void SetBlendMode(BlendMode mode, vk::raii::CommandBuffer& buffer); + + constexpr static size_t FramesInFlight = 3; + constexpr static size_t MaxDrawCount = 25'000; + + std::array m_VertexBuffer; + std::array m_IndexBuffer; + std::array m_MatrixStateBuffer; + std::array m_ShaderScratchBuffer; + PersistentBuffer m_TextureBuffer; + + std::vector m_DescriptorSets; + vk::raii::DescriptorPool m_DescriptorPool = nullptr; + + void InitBatchBuffers(); + void UpdateBatchBuffers(const DisplayAdapter::CommandBatcher& batcher); + + std::map m_Textures; + std::set m_EmptyTextureSlots; + int GetMaxTextureSize(); + int GetMaxTextureCount(); + void DestroyTexture(Texture& texture); + + std::array m_Samplers; + std::array m_PendingTextureUpdates; + void InitTextures(); + void ResolutionChanged() override; + + intptr_t CreateRenderTargetTexture(int width, int height); +}; + +#endif diff --git a/src/RageUtil/Graphics/RendererVK/Texture.h b/src/RageUtil/Graphics/RendererVK/Texture.h new file mode 100644 index 0000000000..f43507293a --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/Texture.h @@ -0,0 +1,27 @@ +#ifndef RENDERER_VK_TEXTURE_H +#define RENDERER_VK_TEXTURE_H + +#include +#include +#include + +struct Texture +{ + enum + { + Wrapping = 0b01, + Filtering = 0b10, + PossibleSamplerCount = 4, + MaxTextures = 1024, + }; + VmaAllocation allocation = nullptr; + VmaAllocator allocator = nullptr; + vk::Image image = {}; + vk::ImageView view = nullptr; + uint32_t width = 0; + uint32_t height = 0; + bool initialized = false; + vk::ImageLayout currentLayout = vk::ImageLayout::eUndefined; +}; + +#endif diff --git a/src/RageUtil/Graphics/RendererVK/VkUtils.cpp b/src/RageUtil/Graphics/RendererVK/VkUtils.cpp new file mode 100644 index 0000000000..30a1586884 --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/VkUtils.cpp @@ -0,0 +1,125 @@ +#ifdef _WIN32 +#define VK_USE_PLATFORM_WIN32_KHR +#endif +#ifdef __unix__ +#define VK_USE_PLATFORM_XLIB_KHR +#endif + +#include "VkUtils.h" +#include +#include +#include +#include "Core/Services/Locator.hpp" +#include +#include + +void +ThrowIfFail(VkResult result, const std::source_location location) +{ + if (result == VK_SUCCESS) { + return; + } + + const std::string message = + fmt::format("RendererVK failed: VkResult {} at {}:{} in function {}", + static_cast(result), + location.file_name(), + location.line(), + location.function_name()); + Locator::getLogger()->error(message); + throw std::runtime_error(message.c_str()); +} + +void +ThrowIfFail(vk::Result result, const std::source_location location) +{ + ThrowIfFail(static_cast(result), location); +} + +void +Fail(const std::source_location location) +{ + const std::string message = + fmt::format("RendererVK failed at {}:{} in function {}", + location.file_name(), + location.line(), + location.function_name()); + Locator::getLogger()->error(message); + throw std::runtime_error(message.c_str()); +} + +std::vector +CompileShader(const std::string& sourceName, + shaderc_shader_kind shaderKind, + const std::string& source) +{ + shaderc::Compiler compiler; + shaderc::CompileOptions options; + + auto result = compiler.CompileGlslToSpv( + source, shaderKind, sourceName.c_str(), options); + + if (result.GetCompilationStatus() != shaderc_compilation_status_success) { + auto message = fmt::format("Vulkan GLSL shader compilation failed: {}", + result.GetErrorMessage()); + Locator::getLogger()->error(message); + sm_crash(message.c_str()); + } + + return { result.begin(), result.end() }; +} + +vk::raii::ShaderModule +LoadShaderFromFile(std::string path, + vk::raii::Device& device, + ShaderType shaderType) +{ +#ifdef _WIN32 + if (path[0] == '/') { + path = path.substr(1); + } +#endif + + shaderc_shader_kind shaderKind = {}; + switch (shaderType) { + case ShaderType_Vertex: + shaderKind = shaderc_vertex_shader; + break; + case ShaderType_Fragment: + shaderKind = shaderc_fragment_shader; + break; + default: + assert(false && "Invalid shader type specified!"); + } + + std::ifstream inputFile(path); + std::stringstream contents; + contents << inputFile.rdbuf(); + auto shaderBlob = CompileShader("meow", shaderKind, contents.str()); + + VkShaderModuleCreateInfo createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + createInfo.pNext = nullptr; + createInfo.codeSize = shaderBlob.size() * sizeof(uint32_t); + createInfo.pCode = shaderBlob.data(); + + return vk::raii::ShaderModule(device, createInfo); +} + +std::optional +GetMemoryType(uint32_t typeBits, + vk::MemoryPropertyFlags neededProps, + vk::PhysicalDeviceMemoryProperties memoryProps) +{ + for (uint32_t i = 0; i < memoryProps.memoryTypeCount; i++) { + if ((typeBits & 1) == 1) { + if ((memoryProps.memoryTypes[i].propertyFlags & neededProps) == + neededProps) { + return i; + } + } + typeBits >>= 1; + } + + return std::nullopt; +} diff --git a/src/RageUtil/Graphics/RendererVK/VkUtils.h b/src/RageUtil/Graphics/RendererVK/VkUtils.h new file mode 100644 index 0000000000..32a124875c --- /dev/null +++ b/src/RageUtil/Graphics/RendererVK/VkUtils.h @@ -0,0 +1,57 @@ +#ifndef VK_UTILS_H +#define VK_UTILS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +void +ThrowIfFail( + VkResult result, + const std::source_location location = std::source_location::current()); + +void +ThrowIfFail( + vk::Result result, + const std::source_location location = std::source_location::current()); + +void +Fail(const std::source_location location = std::source_location::current()); + +vk::raii::ShaderModule +LoadShaderFromFile(std::string path, + vk::raii::Device& device, + ShaderType shaderType); + +std::optional +GetMemoryType(uint32_t typeBits, + vk::MemoryPropertyFlags neededProps, + vk::PhysicalDeviceMemoryProperties memoryProps); + +template +std::string +GetDetailedErrorString(vkb::Result& result) +{ + std::string reason; + auto& reasons = result.detailed_failure_reasons(); + + for (int i = 0; i < reasons.size(); i++) { + reason += reasons[i]; + if (i != reasons.size() - 1) { + reason += " ; "; + } + } + + if (reasons.empty()) { + reason = "No reason given"; + } + + return reason; +} + +#endif diff --git a/src/RageUtil/Misc/RageTypes.cpp b/src/RageUtil/Misc/RageTypes.cpp index b5e5333971..89e2676a49 100644 --- a/src/RageUtil/Misc/RageTypes.cpp +++ b/src/RageUtil/Misc/RageTypes.cpp @@ -257,3 +257,15 @@ LuaFunc_lerp_color(lua_State* L) return 1; } LUAFUNC_REGISTER_COMMON(lerp_color); + +static const char* ShaderTypeNames[] = { "Vertex", "Fragment" }; +XToString(ShaderType); +LuaXType(ShaderType); + +static const char* ShaderParamTypeNames[] = { "Int", + "IntArray", + "Float", + "FloatArray", + "Texture" }; +XToString(ShaderParamType); +LuaXType(ShaderParamType); diff --git a/src/RageUtil/Misc/RageTypes.h b/src/RageUtil/Misc/RageTypes.h index 39fc626dd3..6d4b11e49e 100644 --- a/src/RageUtil/Misc/RageTypes.h +++ b/src/RageUtil/Misc/RageTypes.h @@ -645,4 +645,25 @@ struct RageMatrix float m[4][4]{}; }; +enum ShaderType +{ + ShaderType_Vertex, + ShaderType_Fragment, + NUM_ShaderType, + ShaderType_Invalid +}; +LuaDeclareType(ShaderType); + +enum ShaderParamType +{ + ShaderParamType_Int, + ShaderParamType_IntArray, + ShaderParamType_Float, + ShaderParamType_FloatArray, + ShaderParamType_Texture, + NUM_ShaderParamType, + ShaderParamType_Invalid +}; +LuaDeclareType(ShaderParamType); + #endif diff --git a/src/arch/CMakeLists.txt b/src/arch/CMakeLists.txt index be3d2990b6..a36778d345 100644 --- a/src/arch/CMakeLists.txt +++ b/src/arch/CMakeLists.txt @@ -92,9 +92,13 @@ source_group("Arch Specific\\\\Movie Texture" FILES ${SMDATA_ARCH_MOVIE_TEXTURE_ list(APPEND SMDATA_ARCH_LOWLEVEL_SRC "LowLevelWindow/LowLevelWindow.cpp") list(APPEND SMDATA_ARCH_LOWLEVEL_HPP "LowLevelWindow/LowLevelWindow.h") +list(APPEND SMDATA_ARCH_LOWLEVEL_SRC "LowLevelWindowVK/LowLevelWindowVK.cpp") +list(APPEND SMDATA_ARCH_LOWLEVEL_HPP "LowLevelWindowVK/LowLevelWindowVK.h") if(WIN32) list(APPEND SMDATA_ARCH_LOWLEVEL_SRC "LowLevelWindow/LowLevelWindow_Win32.cpp") list(APPEND SMDATA_ARCH_LOWLEVEL_HPP "LowLevelWindow/LowLevelWindow_Win32.h") + list(APPEND SMDATA_ARCH_LOWLEVEL_SRC "LowLevelWindowVK/LowLevelWindowVK_Win32.cpp") + list(APPEND SMDATA_ARCH_LOWLEVEL_HPP "LowLevelWindowVK/LowLevelWindowVK_Win32.h") elseif(APPLE) list(APPEND SMDATA_ARCH_LOWLEVEL_SRC "LowLevelWindow/LowLevelWindow_MacOSX.mm") @@ -103,6 +107,8 @@ elseif(APPLE) else() # Unix list(APPEND SMDATA_ARCH_LOWLEVEL_SRC "LowLevelWindow/LowLevelWindow_X11.cpp") list(APPEND SMDATA_ARCH_LOWLEVEL_HPP "LowLevelWindow/LowLevelWindow_X11.h") + list(APPEND SMDATA_ARCH_LOWLEVEL_SRC "LowLevelWindowVK/LowLevelWindowVK_X11.cpp") + list(APPEND SMDATA_ARCH_LOWLEVEL_HPP "LowLevelWindowVK/LowLevelWindowVK_X11.h") endif(WIN32) source_group("Arch Specific\\\\Low Level Window" FILES ${SMDATA_ARCH_LOWLEVEL_SRC} ${SMDATA_ARCH_LOWLEVEL_HPP}) diff --git a/src/arch/LowLevelWindow/LowLevelWindow_X11.cpp b/src/arch/LowLevelWindow/LowLevelWindow_X11.cpp index 5e580afb7c..5e386d1067 100644 --- a/src/arch/LowLevelWindow/LowLevelWindow_X11.cpp +++ b/src/arch/LowLevelWindow/LowLevelWindow_X11.cpp @@ -91,15 +91,20 @@ LowLevelWindow_X11::LowLevelWindow_X11() iXServerVersion %= 1000; int iPatch = iXServerVersion; - Locator::getLogger()->info("Display: {} (screen {})", DisplayString(Dpy), iScreen); + Locator::getLogger()->info( + "Display: {} (screen {})", DisplayString(Dpy), iScreen); Locator::getLogger()->info("X server vendor: {} [{}.{}.{}.{}]", - XServerVendor(Dpy), iMajor, iMinor, iRevision, iPatch); + XServerVendor(Dpy), + iMajor, + iMinor, + iRevision, + iPatch); Locator::getLogger()->info("Server GLX vendor: {} [{}]", - glXQueryServerString(Dpy, iScreen, GLX_VENDOR), - glXQueryServerString(Dpy, iScreen, GLX_VERSION)); + glXQueryServerString(Dpy, iScreen, GLX_VENDOR), + glXQueryServerString(Dpy, iScreen, GLX_VERSION)); Locator::getLogger()->info("Client GLX vendor: {} [{}]", - glXGetClientString(Dpy, GLX_VENDOR), - glXGetClientString(Dpy, GLX_VERSION)); + glXGetClientString(Dpy, GLX_VENDOR), + glXGetClientString(Dpy, GLX_VERSION)); m_bWasWindowed = true; g_pScreenConfig = XRRGetScreenInfo(Dpy, RootWindow(Dpy, DefaultScreen(Dpy))); @@ -408,8 +413,9 @@ LowLevelWindow_X11::TryVideoMode(const VideoModeParams& p, bool& bNewDeviceOut) } } if (targetOut == None) { - Locator::getLogger()->info("Did not find display output {}, trying another", - p.sDisplayId.c_str()); + Locator::getLogger()->info( + "Did not find display output {}, trying another", + p.sDisplayId.c_str()); // didn't find named output, pick primary/or at least one that // works if (g_iRandRVerMajor >= 1 && g_iRandRVerMinor >= 3) { @@ -509,11 +515,12 @@ LowLevelWindow_X11::TryVideoMode(const VideoModeParams& p, bool& bNewDeviceOut) const std::string tgtOutName = std::string( tgtOutInfo->name, static_cast(tgtOutInfo->nameLen)); - Locator::getLogger()->info("XRandR output config using CRTC {} in mode {}, " - "driving output %s", - g_usedCrtc, - mode, - tgtOutName.c_str()); + Locator::getLogger()->info( + "XRandR output config using CRTC {} in mode {}, " + "driving output %s", + g_usedCrtc, + mode, + tgtOutName.c_str()); // and FIRE! Status s = XRRSetCrtcConfig(Dpy, scrRes, @@ -685,8 +692,8 @@ LowLevelWindow_X11::TryVideoMode(const VideoModeParams& p, bool& bNewDeviceOut) if (GLXEW_EXT_swap_control) // I haven't seen this actually implemented // yet, but why not. glXSwapIntervalEXT(Dpy, Win, CurrentParams.vsync ? 1 : 0); - // XXX: These two might be server-global. I should look into whether - // to try to preserve the original value on exit. + // XXX: These two might be server-global. I should look into whether + // to try to preserve the original value on exit. #ifdef GLXEW_MESA_swap_control // Added in 1.7. 1.6 is still common out there // apparently. else if (GLXEW_MESA_swap_control) // Haven't seen this NOT implemented @@ -708,14 +715,16 @@ LowLevelWindow_X11::Update() XEvent event; if (XCheckTypedEvent(Dpy, ClientMessage, &event) && event.xclient.data.l[0] == g_wmDeleteMessage) { - GameLoop::setUserQuit(); + GameLoop::setUserQuit(); } } void LowLevelWindow_X11::LogDebugInformation() const { - Locator::getLogger()->info("Direct rendering: {}", glXIsDirect(Dpy, glXGetCurrentContext()) ? "yes" : "no"); + Locator::getLogger()->info("Direct rendering: {}", + glXIsDirect(Dpy, glXGetCurrentContext()) ? "yes" + : "no"); } bool @@ -1005,7 +1014,8 @@ RenderTarget_X11::Create(const RenderTargetParam& param, glGenTextures(1, reinterpret_cast(&m_iTexHandle)); glBindTexture(GL_TEXTURE_2D, m_iTexHandle); - Locator::getLogger()->trace("n {}, {}x{}", m_iTexHandle, param.iWidth, param.iHeight); + Locator::getLogger()->trace( + "n {}, {}x{}", m_iTexHandle, param.iWidth, param.iHeight); while (glGetError() != GL_NO_ERROR) ; diff --git a/src/arch/LowLevelWindowVK/LowLevelWindowVK.cpp b/src/arch/LowLevelWindowVK/LowLevelWindowVK.cpp new file mode 100644 index 0000000000..d95e899a41 --- /dev/null +++ b/src/arch/LowLevelWindowVK/LowLevelWindowVK.cpp @@ -0,0 +1,15 @@ +#include "LowLevelWindowVK.h" +#include "Etterna/Globals/global.h" +#include "arch/arch_default.h" +#include + +LowLevelWindowVK* +LowLevelWindowVK::Create() +{ +#ifdef __APPLE__ + assert(false && "Should never be called"); + return nullptr; +#else + return new LOW_LEVEL_WINDOW_VK; +#endif +} diff --git a/src/arch/LowLevelWindowVK/LowLevelWindowVK.h b/src/arch/LowLevelWindowVK/LowLevelWindowVK.h new file mode 100644 index 0000000000..e4215e729d --- /dev/null +++ b/src/arch/LowLevelWindowVK/LowLevelWindowVK.h @@ -0,0 +1,32 @@ +#ifndef LOW_LEVEL_WINDOW_VK_H +#define LOW_LEVEL_WINDOW_VK_H + +#include +#include + +class DisplaySpec; +typedef std::set DisplaySpecs; +class VideoModeParams; +class ActualVideoModeParams; +class RenderTarget; +struct RenderTargetParam; +class LowLevelWindowVK +{ + public: + static LowLevelWindowVK* Create(); + + virtual ~LowLevelWindowVK() = default; + + // Return "" if mode change was successful, otherwise an error message. + // bNewDeviceOut is set true if a new device was created and textures + // need to be reloaded. + virtual std::string TryVideoMode(const VideoModeParams& p, + bool& bNewDeviceOut) = 0; + virtual void GetDisplaySpecs(DisplaySpecs& out) const = 0; + virtual void Update() {} + + virtual const ActualVideoModeParams* GetActualVideoModeParams() const = 0; + virtual bool SupportsFullscreenBorderlessWindow() const { return false; }; +}; + +#endif \ No newline at end of file diff --git a/src/arch/LowLevelWindowVK/LowLevelWindowVK_Win32.cpp b/src/arch/LowLevelWindowVK/LowLevelWindowVK_Win32.cpp new file mode 100644 index 0000000000..c2e22365cc --- /dev/null +++ b/src/arch/LowLevelWindowVK/LowLevelWindowVK_Win32.cpp @@ -0,0 +1,85 @@ +#include "LowLevelWindowVK_Win32.h" +#include + +LowLevelWindowVK_Win32::LowLevelWindowVK_Win32() +{ + GraphicsWindow::Initialize(false); +} + +std::string +LowLevelWindowVK_Win32::TryVideoMode(const VideoModeParams& p, + bool& bNewDeviceOut) +{ + GraphicsWindow::CreateGraphicsWindow(p); + return ""; +} + +static BOOL CALLBACK +EnumerateMonitors(HMONITOR monitor, + HDC deviceContextHandle, + LPRECT monitorRect, + LPARAM userData) +{ + auto* out = reinterpret_cast(userData); + + MONITORINFOEXW monitorInfo = {}; + monitorInfo.cbSize = sizeof(monitorInfo); + if (!GetMonitorInfoW(monitor, &monitorInfo)) { + return TRUE; + } + + std::set modes; + DEVMODEW deviceMode = {}; + deviceMode.dmSize = sizeof(deviceMode); + deviceMode.dmDriverExtra = 0; + DWORD modeIndex = 0; + while (EnumDisplaySettingsW(monitorInfo.szDevice, modeIndex, &deviceMode)) { + modes.insert({ deviceMode.dmPelsWidth, + deviceMode.dmPelsHeight, + static_cast(deviceMode.dmDisplayFrequency) }); + modeIndex++; + } + + DisplayMode active = { 0, 0, 0.0 }; + if (EnumDisplaySettingsW( + monitorInfo.szDevice, ENUM_CURRENT_SETTINGS, &deviceMode)) { + active.width = deviceMode.dmPelsWidth; + active.height = deviceMode.dmPelsHeight; + active.refreshRate = static_cast(deviceMode.dmDisplayFrequency); + } else if (!modes.empty()) { + active = *modes.begin(); + } + + RectI bounds(monitorInfo.rcMonitor.left, + monitorInfo.rcMonitor.top, + monitorInfo.rcMonitor.right, + monitorInfo.rcMonitor.bottom); + + out->insert(DisplaySpec("", "Fullscreen", modes, active, bounds)); + return TRUE; +} + +void +LowLevelWindowVK_Win32::GetDisplaySpecs(DisplaySpecs& out) const +{ + EnumDisplayMonitors( + nullptr, nullptr, EnumerateMonitors, reinterpret_cast(&out)); +} + +void +LowLevelWindowVK_Win32::Update() +{ + GraphicsWindow::Update(); +} + +const ActualVideoModeParams* +LowLevelWindowVK_Win32::GetActualVideoModeParams() const +{ + return static_cast(GraphicsWindow::GetParams()); +} + +bool +LowLevelWindowVK_Win32::SupportsFullscreenBorderlessWindow() const +{ + return true; +} diff --git a/src/arch/LowLevelWindowVK/LowLevelWindowVK_Win32.h b/src/arch/LowLevelWindowVK/LowLevelWindowVK_Win32.h new file mode 100644 index 0000000000..da18618893 --- /dev/null +++ b/src/arch/LowLevelWindowVK/LowLevelWindowVK_Win32.h @@ -0,0 +1,24 @@ +#ifndef LOW_LEVEL_WINDOW_VK_WIN32_H +#define LOW_LEVEL_WINDOW_VK_WIN32_H + +#include "LowLevelWindowVK.h" + +class LowLevelWindowVK_Win32 : public LowLevelWindowVK +{ + public: + LowLevelWindowVK_Win32(); + ~LowLevelWindowVK_Win32() {} + + // Return "" if mode change was successful, otherwise an error message. + // bNewDeviceOut is set true if a new device was created and textures + // need to be reloaded. + std::string TryVideoMode(const VideoModeParams& p, + bool& bNewDeviceOut) override; + void GetDisplaySpecs(DisplaySpecs& out) const override; + void Update() override; + + const ActualVideoModeParams* GetActualVideoModeParams() const override; + bool SupportsFullscreenBorderlessWindow() const override; +}; + +#endif diff --git a/src/arch/LowLevelWindowVK/LowLevelWindowVK_X11.cpp b/src/arch/LowLevelWindowVK/LowLevelWindowVK_X11.cpp new file mode 100644 index 0000000000..770aaece6d --- /dev/null +++ b/src/arch/LowLevelWindowVK/LowLevelWindowVK_X11.cpp @@ -0,0 +1,776 @@ +#include "LowLevelWindowVK_X11.h" +#include "Core/Services/Locator.hpp" +#include "RageUtil/Graphics/RageDisplay.h" +#include "archutils/Unix/X11Helper.h" +#include "Etterna/Models/Misc/DisplaySpec.h" +#include "Etterna/Globals/GameLoop.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace X11Helper; + +const std::string ID_XSCREEN = "XSCREEN_RANDR"; + +static std::string FAILED_CONNECTION_XSERVER( + "LowLevelWindowVK_X11: " + "Failed to establish a connection with the X server"); + +bool +LowLevelWindowVK_X11::NetWMSupported(Display* Dpy, Atom feature) const +{ + Atom net_supported = XInternAtom(Dpy, "_NET_SUPPORTED", False); + Atom actual_type_return = BadAtom; + int actual_format_return = 0; + unsigned long nitems_return = 0; + unsigned long bytes_after_return = 0; + Atom* prop_return; + Status status = + XGetWindowProperty(Dpy, + RootWindow(Dpy, DefaultScreen(Dpy)), + net_supported, + 0, + 8192, + False, + XA_ATOM, + &actual_type_return, + &actual_format_return, + &nitems_return, + &bytes_after_return, + reinterpret_cast(&prop_return)); + if (status != Success) { + return false; + } + + auto supported = + std::find(prop_return, prop_return + nitems_return, feature) != + prop_return + nitems_return; + XFree(prop_return); + return supported; +} + +inline float +calcRandRRefresh(unsigned long iPixelClock, int iHTotal, int iVTotal) +{ + return (iPixelClock) / (iHTotal * iVTotal); +} + +LowLevelWindowVK_X11::LowLevelWindowVK_X11() +{ + if (!OpenXConnection()) + throw std::runtime_error(FAILED_CONNECTION_XSERVER); + + if (XRRQueryVersion(Dpy, &m_iRandRVerMajor, &m_iRandRVerMinor) && + m_iRandRVerMajor >= 1 && m_iRandRVerMinor >= 2) + m_bUseXRandR12 = true; + + const int iScreen = DefaultScreen(Dpy); + Locator::getLogger()->info( + "Display: {} (screen {})", DisplayString(Dpy), iScreen); + int iXServerVersion = XVendorRelease(Dpy); + int iMajor = iXServerVersion / 10000000; + iXServerVersion %= 10000000; + int iMinor = iXServerVersion / 100000; + iXServerVersion %= 100000; + int iRevision = iXServerVersion / 1000; + iXServerVersion %= 1000; + int iPatch = iXServerVersion; + Locator::getLogger()->info("X server vendor: {} [{}.{}.{}.{}]", + XServerVendor(Dpy), + iMajor, + iMinor, + iRevision, + iPatch); + + m_bWasWindowed = true; + m_pScreenConfig = + XRRGetScreenInfo(Dpy, RootWindow(Dpy, DefaultScreen(Dpy))); +} + +LowLevelWindowVK_X11::~LowLevelWindowVK_X11() +{ + if (!m_bWasWindowed) { + if (m_bChangedScreenSize) { + XRRSetScreenConfig(Dpy, + m_pScreenConfig, + RootWindow(Dpy, DefaultScreen(Dpy)), + m_iOldSize, + m_OldRotation, + CurrentTime); + } + if (m_usedCrtc != None) { + XRRScreenResources* res = XRRGetScreenResources(Dpy, Win); + XRRCrtcInfo* conf = XRRGetCrtcInfo(Dpy, res, m_usedCrtc); + XRRSetCrtcConfig(Dpy, + res, + m_usedCrtc, + conf->timestamp, + conf->x, + conf->y, + m_originalRandRMode, + conf->rotation, + conf->outputs, + conf->noutput); + XRRFreeScreenResources(res); + XRRFreeCrtcInfo(conf); + } + XUngrabKeyboard(Dpy, CurrentTime); + } + + if (Win != None) { + XDestroyWindow(Dpy, Win); + Win = None; + } + CloseXConnection(); +} + +void +LowLevelWindowVK_X11::RestoreOutputConfig() +{ + if (m_bChangedScreenSize) { + XRRSetScreenConfig(Dpy, + m_pScreenConfig, + RootWindow(Dpy, DefaultScreen(Dpy)), + m_iOldSize, + m_OldRotation, + CurrentTime); + } + if (m_usedCrtc != None) { + assert(m_bUseXRandR12); + XRRScreenResources* res = XRRGetScreenResources(Dpy, Win); + XRRCrtcInfo* conf = XRRGetCrtcInfo(Dpy, res, m_usedCrtc); + XRRSetCrtcConfig(Dpy, + res, + m_usedCrtc, + conf->timestamp, + conf->x, + conf->y, + m_originalRandRMode, + conf->rotation, + conf->outputs, + conf->noutput); + XRRFreeScreenResources(res); + XRRFreeCrtcInfo(conf); + } + m_iOldSize = None; + m_bChangedScreenSize = false; + m_usedCrtc = None; + m_OldRotation = RR_Rotate_0; +} + +std::string +LowLevelWindowVK_X11::TryVideoMode(const VideoModeParams& p, + bool& bNewDeviceOut) +{ + // We're going to be interested in MapNotify/ConfigureNotify events in this + // routine, so ensure our event mask includes these, restore it on exit + XWindowAttributes winAttrib; + auto restore = [&](XWindowAttributes* attr) { + XSelectInput(Dpy, Win, attr->your_event_mask); + }; + auto restoreAttrib = std::unique_ptr( + &winAttrib, restore); + + // These might change if we're rendering at different resolution than window + int windowWidth = p.width; + int windowHeight = p.height; + bool renderOffscreen = false; + + if (CurrentParams == nullptr) { + CurrentParams = std::make_unique(); + } + + if (p.bpp != CurrentParams->bpp || m_bWasWindowed != p.windowed) { + // Different depth, or we didn't make a window before. New context. + bNewDeviceOut = true; + + int visAttribs[32]; + int i = 0; + assert(p.bpp == 16 || p.bpp == 32); + + int screen = DefaultScreen(Dpy); + int depth = DefaultDepth(Dpy, screen); + Visual* visual = DefaultVisual(Dpy, screen); + // I get strange behavior if I add override redirect after creating the + // window. So, let's recreate the window when changing that state. + if (!MakeWindow( + Win, screen, depth, visual, p.width, p.height, !p.windowed)) + return "Failed to create the window."; + + char* szWindowTitle = const_cast(p.sWindowTitle.c_str()); + XChangeProperty(Dpy, + Win, + XA_WM_NAME, + XA_STRING, + 8, + PropModeReplace, + reinterpret_cast(szWindowTitle), + strlen(szWindowTitle)); + + XGetWindowAttributes(Dpy, Win, &winAttrib); + XSelectInput(Dpy, + Win, + winAttrib.your_event_mask | StructureNotifyMask | + PropertyChangeMask); + + wmDeleteMessage = XInternAtom(Dpy, "WM_DELETE_WINDOW", False); + XSetWMProtocols(Dpy, Win, &wmDeleteMessage, 1); + + XMapWindow(Dpy, Win); + + XEvent ev; + do { + XWindowEvent(Dpy, Win, StructureNotifyMask, &ev); + } while (ev.type != MapNotify); + } else { + // We're remodeling the existing window, and not touching the context. + bNewDeviceOut = false; + + XGetWindowAttributes(Dpy, Win, &winAttrib); + XSelectInput(Dpy, + Win, + winAttrib.your_event_mask | StructureNotifyMask | + PropertyChangeMask); + + if (!p.windowed) { + // X11 is an asynchronous beast. If we're resizing an existing + // window directly (i.e. override-redirect as opposed to asking the + // WM to do it) and don't wait for the window to actually be + // resized, we'll get unexpected results from glViewport() etc. I + // don't know why, or why it *doesn't* break in the slower process + // of waiting for the WM to resize the window. + + // So, set the event mask so we're notified when the window is + // resized... Send the resize command... + XResizeWindow(Dpy, + Win, + static_cast(p.width), + static_cast(p.height)); + + // We'll wait for the notification once we've done everything else, + // to save time. + } + } + + float rate = 60; // Will be unchanged if windowed. Not sure I care. + + if (!p.windowed) { + RestoreOutputConfig(); + + if (p.sDisplayId == ID_XSCREEN || p.sDisplayId.empty()) { + // If the user changed the resolution while StepMania was windowed + // we overwrite the resolution to restore with it at exit. + m_iOldSize = + XRRConfigCurrentConfiguration(m_pScreenConfig, &m_OldRotation); + m_bWasWindowed = false; + + // Find a matching mode. + int iSizesXct; + XRRScreenSize* pSizesX = + XRRSizes(Dpy, DefaultScreen(Dpy), &iSizesXct); + assert(iSizesXct != 0 && + "Couldn't get resolution list from X server"); + + int iSizeMatch = -1; + + for (int i = 0; i < iSizesXct; ++i) { + if (pSizesX[i].width == p.width && + pSizesX[i].height == p.height) { + iSizeMatch = i; + break; + } + } + if (iSizeMatch != m_iOldSize) { + m_bChangedScreenSize = true; + } + + // Set this mode. + // XXX: This doesn't handle if the config has changed since we + // queried it (see man Xrandr) + Status s = XRRSetScreenConfig(Dpy, + m_pScreenConfig, + RootWindow(Dpy, DefaultScreen(Dpy)), + iSizeMatch, + 1, + CurrentTime); + if (s) { + return "Failed to set screen config"; + } + + XMoveWindow(Dpy, Win, 0, 0); + + XRaiseWindow(Dpy, Win); + + // We want to prevent the WM from catching anything that comes from + // the keyboard. We should do this every time on fullscreen and not + // only we entering from windowed mode because we could lose focus + // at resolution change and that will leave the user input locked. + while (XGrabKeyboard( + Dpy, Win, True, GrabModeAsync, GrabModeAsync, CurrentTime)) + ; + + } else { + assert(m_bUseXRandR12); + /* === Configuring a specific CRTC === */ + // Arcane and undocumented but PROPER XRandR 1.2 method. + // What we do is directly reconfigure the CRTC of the primary + // display, Which prevents the (RandR) screen itself from resizing, + // and therefore leaving user's desktop unmolested. + Locator::getLogger()->info("LowLevelWindow_X11: Using XRandR"); + + XRRScreenResources* scrRes = XRRGetScreenResources(Dpy, Win); + assert(scrRes != NULL); + assert(scrRes->ncrtc > 0); + assert(scrRes->noutput > 0); + assert(scrRes->nmode > 0); + + // If an output name has been specified, search for it + RROutput targetOut = None; + if (p.sDisplayId.length() > 0) { + for (unsigned int i = 0; + i < scrRes->noutput && targetOut == None; + ++i) { + XRROutputInfo* outInfo = + XRRGetOutputInfo(Dpy, scrRes, scrRes->outputs[i]); + std::string outName = + std::string(outInfo->name, + static_cast(outInfo->nameLen)); + if (p.sDisplayId == outName) { + targetOut = scrRes->outputs[i]; + } + XRRFreeOutputInfo(outInfo); + } + } + if (targetOut == None) { + Locator::getLogger()->info( + "Did not find display output {}, trying another", + p.sDisplayId.c_str()); + // didn't find named output, pick primary/or at least one that + // works + if (m_iRandRVerMajor >= 1 && m_iRandRVerMinor >= 3) { + // RandR 1.3 can tell us what the primary display is. + targetOut = XRRGetOutputPrimary(Dpy, Win); + } else { + // Only RandR 1.2. We'll look for a "Connected" output, or + // if we can't find that, (it is possible the connection + // state could be unknown), we'll at least look for an + // output with a CRTC driving it + RROutput connected = None, hasCrtc = None; + for (unsigned int i = 0; i < scrRes->noutput; ++i) { + XRROutputInfo* outInfo = + XRRGetOutputInfo(Dpy, scrRes, scrRes->outputs[i]); + if (outInfo->connection == + RR_Connected) { // Check for CONNECTED state: + // Connected == 0 + connected = scrRes->outputs[i]; + } + if (outInfo->crtc != None) { + hasCrtc = outInfo->crtc; + } + XRRFreeOutputInfo(outInfo); + } + targetOut = connected != None ? connected : hasCrtc; + assert(targetOut != None); + } + } + + // if the target output is not currently being driven by a crtc, + // find an unused crtc that can be connected to it + XRROutputInfo* tgtOutInfo = + XRRGetOutputInfo(Dpy, scrRes, targetOut); + if (tgtOutInfo == NULL) { + XRRFreeScreenResources(scrRes); + return "Failed to find XRROutput"; + } + + RRCrtc tgtOutCrtc = tgtOutInfo->crtc; + if (tgtOutCrtc == None) { + for (unsigned int i = 0; i < tgtOutInfo->ncrtc; ++i) { + XRRCrtcInfo* crtcInfo = + XRRGetCrtcInfo(Dpy, scrRes, tgtOutInfo->crtcs[i]); + if (crtcInfo->mode == None) { + tgtOutCrtc = tgtOutInfo->crtcs[i]; + } + XRRFreeCrtcInfo(crtcInfo); + } + } + assert(tgtOutCrtc != None); + + XRRCrtcInfo* oldConf = XRRGetCrtcInfo(Dpy, scrRes, tgtOutCrtc); + + float fRefreshDiff = 99999; + float fRefreshRate = 0; + RRMode mode = None; + // A quirk of XRandR is that the width and height are as the display + // controller ("CRTC") sees it, which means height and width are + // flipped if there's rotation going on. + const bool bPortrait = + (oldConf->rotation & (RR_Rotate_90 | RR_Rotate_270)) != 0; + // Find a mode that matches our exact wanted resolution, + // with as close to our desired refresh rate as possible. + for (int i = 0; i < scrRes->nmode; i++) { + const XRRModeInfo& thisMI = scrRes->modes[i]; + const unsigned int modeWidth = + bPortrait ? thisMI.height : thisMI.width; + const unsigned int modeHeight = + bPortrait ? thisMI.width : thisMI.height; + if (modeWidth == p.width && modeHeight == p.height) { + float fTempRefresh = calcRandRRefresh( + thisMI.dotClock, thisMI.hTotal, thisMI.vTotal); + float fTempDiff = std::abs(p.rate - fTempRefresh); + if ((p.rate != REFRESH_DEFAULT && + fTempDiff < fRefreshDiff) || + (p.rate == REFRESH_DEFAULT && + fTempRefresh > fRefreshRate)) { + int j; + // Ensure that the output supports the mode + for (j = 0; j < tgtOutInfo->nmode; j++) + if (tgtOutInfo->modes[j] == scrRes->modes[i].id) { + mode = tgtOutInfo->modes[j]; + break; + } + + if (j < tgtOutInfo->nmode) { + fRefreshRate = fTempRefresh; + fRefreshDiff = fTempDiff; + } + } + } + } + rate = roundf(fRefreshRate); + + m_usedCrtc = tgtOutCrtc; + m_originalRandRMode = oldConf->mode; + + const std::string tgtOutName = std::string( + tgtOutInfo->name, static_cast(tgtOutInfo->nameLen)); + Locator::getLogger()->info( + "XRandR output config using CRTC {} in mode {}, " + "driving output %s", + m_usedCrtc, + mode, + tgtOutName.c_str()); + // and FIRE! + Status s = XRRSetCrtcConfig(Dpy, + scrRes, + m_usedCrtc, + oldConf->timestamp, + oldConf->x, + oldConf->y, + mode, + oldConf->rotation, + oldConf->outputs, + oldConf->noutput); + if (s) { + XRRFreeCrtcInfo(oldConf); + XRRFreeOutputInfo(tgtOutInfo); + XRRFreeScreenResources(scrRes); + return "Failed to set CRTC config"; + } + + // We don't move to absolute 0,0 because that may be in the area of + // a different output. Instead we preserved the corner of our CRTC; + // go to that. + XMoveWindow(Dpy, Win, oldConf->x, oldConf->y); + + // Final cleanup + XRRFreeCrtcInfo(oldConf); + XRRFreeOutputInfo(tgtOutInfo); + XRRFreeScreenResources(scrRes); + } + m_bWasWindowed = false; + + XRaiseWindow(Dpy, Win); + + // We want to prevent the WM from catching anything that comes from the + // keyboard. We should do this every time on fullscreen and not only we + // entering from windowed mode because we could lose focus at resolution + // change and that will leave the user input locked. + while (XGrabKeyboard( + Dpy, Win, True, GrabModeAsync, GrabModeAsync, CurrentTime)) + ; + } else // if(p.windowed) + { + if (!m_bWasWindowed) { + // Return the display to the mode it was in before we fullscreened. + RestoreOutputConfig(); + XUngrabKeyboard(Dpy, CurrentTime); + m_bWasWindowed = true; + } + + Atom net_wm_state = XInternAtom(Dpy, "_NET_WM_STATE", False); + Atom fullscreen_state = + XInternAtom(Dpy, "_NET_WM_STATE_FULLSCREEN", False); + Atom maximized_vert = + XInternAtom(Dpy, "_NET_WM_STATE_MAXIMIZED_VERT", False); + Atom maximized_horz = + XInternAtom(Dpy, "_NET_WM_STATE_MAXIMIZED_HORZ", False); + // if FSBW, find matching monitor, move window to its origin, + // then set fullscreen hint, and set the CurrentParams.outWidth, + // CurrentParams.outHeight to the values of that display otherwise set + // the size hints and disable MAXIMIZED_* + if (p.bWindowIsFullscreenBorderless) { + auto specs = DisplaySpecs{}; + GetDisplaySpecs(specs); + auto target = std::find_if( + specs.begin(), specs.end(), [&](const DisplaySpec& spec) { + return p.sDisplayId == spec.id() && + spec.currentMode() != nullptr; + }); + // If we didn't find a matching DisplaySpec for the requested ID, + // pick the first one with a current mode + if (target == specs.end()) { + target = std::find_if( + specs.begin(), specs.end(), [&](const DisplaySpec& spec) { + return spec.currentMode() != nullptr; + }); + } + // If we _still_ haven't found anything (unlikely), then just give + // up + if (target == specs.end()) { + return "Unable to find destination monitor for fullscreen " + "borderless"; + } + + windowWidth = target->currentMode()->width; + windowHeight = target->currentMode()->height; + + if (windowWidth != p.width || windowHeight != p.height) { + renderOffscreen = true; + } + + // Reset anything that might've been set previously: + // (1) Undo Min/Max size bounds + // (2) Remove FULLSCREEN/MAXIMIZED_{HORIZ,VERT} hints + // Without doing this, WM may not let us move/resize window to new + // display Give Window manager the chance to react to changes + // (otherwise, Mutter had problems properly reacting to moving a + // _NET_WM_STATE_FULLSCREEN window to a different output + // and fullscreen resetting FULLSCREEN hint. + XSizeHints hints; + hints.flags = 0; + XSetWMNormalHints(Dpy, Win, &hints); +#if defined(HAVE_XINERAMA) + if (!g_bUseXinerama || !SetWMFullscreenMonitors(*target)) +#endif + { + SetWMState(winAttrib.root, Win, 0, maximized_horz); + SetWMState(winAttrib.root, Win, 0, maximized_vert); + SetWMState(winAttrib.root, Win, 0, fullscreen_state); + + XFlush(Dpy); + XResizeWindow(Dpy, + Win, + static_cast(windowWidth), + static_cast(windowHeight)); + XMoveWindow(Dpy, + Win, + target->currentBounds().left, + target->currentBounds().top); + XRaiseWindow(Dpy, Win); + + SetWMState(winAttrib.root, Win, 1, fullscreen_state); + SetWMState(winAttrib.root, Win, 1, maximized_horz); + SetWMState(winAttrib.root, Win, 1, maximized_vert); + } + } else { + windowWidth = p.width; + windowHeight = p.height; + + SetWMState(winAttrib.root, Win, 0, fullscreen_state); + // Make a window fixed size, don't let resize it or maximize it. + // Do this before resizing the window so that pane-style WMs (Ion, + // ratpoison) don't resize us back inappropriately. + { + XSizeHints hints; + + hints.flags = PMinSize | PMaxSize | PWinGravity; + hints.min_width = hints.max_width = windowWidth; + hints.min_height = hints.max_height = windowHeight; + hints.win_gravity = CenterGravity; + + XSetWMNormalHints(Dpy, Win, &hints); + } + /* Workaround for metacity and compiz: if the window have the same + * resolution or higher than the screen, it gets automaximized even + * when the window is set to not let it happen. This happens when + * changing from fullscreen to window mode and our screen resolution + * is bigger. */ + { + SetWMState(winAttrib.root, Win, 1, maximized_vert); + SetWMState(winAttrib.root, Win, 1, maximized_horz); + + // This one is needed for compiz, if the window reaches out of + // bounds of the screen it becames destroyed, only the window, + // the program is left running. Commented out per the patch at + // http://ssc.ajworld.net/sm-ssc/bugtracker/view.php?id=398 + // XMoveWindow( Dpy, Win, 0, 0 ); + } + } + } + + CurrentParams = std::make_unique(p); + CurrentParams->windowWidth = windowWidth; + CurrentParams->windowHeight = windowHeight; + CurrentParams->renderOffscreen = renderOffscreen; + assert(rate > 0); + CurrentParams->rate = static_cast(roundf(rate)); + + return ""; +} + +void +LowLevelWindowVK_X11::Update() +{ + XEvent event; + if (XCheckTypedEvent(Dpy, ClientMessage, &event) && + event.xclient.data.l[0] == wmDeleteMessage) { + GameLoop::setUserQuit(); + } +} + +void +LowLevelWindowVK_X11::GetDisplaySpecs(DisplaySpecs& out) const +{ + int screenNum = DefaultScreen(Dpy); + Screen* screen = ScreenOfDisplay(Dpy, screenNum); + + XWindowAttributes winAttr = XWindowAttributes(); + if (XGetWindowAttributes(Dpy, Win, &winAttr)) { + screen = winAttr.screen; + screenNum = XScreenNumberOfScreen(screen); + } + + // Create a display spec for the entire X screen itself + // First get current config + Rotation curRotation; + XRRScreenConfiguration* screenConf = XRRGetScreenInfo(Dpy, Win); + const short curRate = XRRConfigCurrentRate(screenConf); + SizeID curSizeId = XRRConfigCurrentConfiguration(screenConf, &curRotation); + // curRotation does not factor into how we report supported XScreen sizes: + // XRR reports the supported *screen* sizes with height/width swapped + // appropriately for currently configured rotation. Supported sizes for + // *output* modes (below) DO NOT account for screen rotation + + std::set screenModes; + int nsizes = 0; + XRRScreenSize* screenSizes = XRRSizes(Dpy, screenNum, &nsizes); + DisplayMode screenCurMode = { 0 }; + for (unsigned int szIdx = 0, mode_idx = 0; szIdx < nsizes; ++szIdx) { + XRRScreenSize& size = screenSizes[szIdx]; + int nrates = 0; + short* rates = XRRRates(Dpy, screenNum, szIdx, &nrates); + for (unsigned int rIdx = 0; rIdx < nrates; ++rIdx, ++mode_idx) { + DisplayMode m = { static_cast(size.width), + static_cast(size.height), + static_cast(rates[rIdx]) }; + screenModes.insert(m); + if (rates[rIdx] == curRate && szIdx == curSizeId) { + screenCurMode = m; + } + } + } + const RectI screenBounds( + 0, 0, screenSizes[curSizeId].width, screenSizes[curSizeId].height); + const DisplaySpec screenSpec( + ID_XSCREEN, "X Screen", screenModes, screenCurMode, screenBounds, true); + out.insert(screenSpec); + // XRRScreenSize array from XRRSizes does *not* have to be returned + // (valgrind said XFree was an invalid free in a small test program, there + // is no XRRFreeScreenSize, etc) + XRRFreeScreenConfigInfo(screenConf); + + if (m_bUseXRandR12) { + // Build per-output DisplaySpecs + + // First, get the list of resolutions that'll be referenced (by RRMode) + // in each OutputInfo + XRRScreenResources* scrRes = XRRGetScreenResources(Dpy, Win); + std::map outputModes; + for (unsigned int i = 0; i < scrRes->nmode; ++i) { + const XRRModeInfo& mode = scrRes->modes[i]; + DisplayMode m = { mode.width, + mode.height, + calcRandRRefresh( + mode.dotClock, mode.hTotal, mode.vTotal) }; + outputModes[mode.id] = m; + } + + // Now, for each output, build a corresponding DisplaySpec + for (unsigned int outIdx = 0; outIdx < scrRes->noutput; ++outIdx) { + XRROutputInfo* outInfo = + XRRGetOutputInfo(Dpy, scrRes, scrRes->outputs[outIdx]); + if (outInfo->nmode > 0) { + // Get the current configuration of the Output, if it's being + // driven by a crtc + RRMode curRRMode = None; + bool bPortrait = false; + int crtcX = 0, crtcY = 0; + if (outInfo->crtc != None) { + XRRCrtcInfo* conf = + XRRGetCrtcInfo(Dpy, scrRes, outInfo->crtc); + curRRMode = conf->mode; + bPortrait = + (conf->rotation & (RR_Rotate_90 | RR_Rotate_270)) != 0; + crtcX = conf->x; + crtcY = conf->y; + XRRFreeCrtcInfo(conf); + } + // Get all supported modes, noting which one, if any, is + // currently active + std::set outputSupported; + DisplayMode outputCurMode = { 0 }; + RectI outBounds; + for (unsigned int modeIdx = 0; modeIdx < outInfo->nmode; + ++modeIdx) { + DisplayMode mode = outputModes[outInfo->modes[modeIdx]]; + unsigned int modeWidth = + bPortrait ? mode.height : mode.width; + unsigned int modeHeight = + bPortrait ? mode.width : mode.height; + DisplayMode m = { modeWidth, modeHeight, mode.refreshRate }; + outputSupported.insert(m); + if (curRRMode != None && + outInfo->modes[modeIdx] == curRRMode) { + outputCurMode = m; + outBounds = RectI( + crtcX, crtcY, crtcX + modeWidth, crtcY + modeHeight); + } + } + const std::string outId( + outInfo->name, static_cast(outInfo->nameLen)); + const std::string outName(outId); + if (curRRMode != None) { + out.insert(DisplaySpec(outId, + outName, + outputSupported, + outputCurMode, + outBounds)); + } else { + out.insert(DisplaySpec(outId, outName, outputSupported)); + } + } + XRRFreeOutputInfo(outInfo); + } + XRRFreeScreenResources(scrRes); + } +} + +const ActualVideoModeParams* +LowLevelWindowVK_X11::GetActualVideoModeParams() const +{ + return CurrentParams.get(); +} + +bool +LowLevelWindowVK_X11::SupportsFullscreenBorderlessWindow() const +{ + Atom fullscreen = XInternAtom(Dpy, "_NET_WM_STATE_FULLSCREEN", False); + return NetWMSupported(Dpy, fullscreen); +} diff --git a/src/arch/LowLevelWindowVK/LowLevelWindowVK_X11.h b/src/arch/LowLevelWindowVK/LowLevelWindowVK_X11.h new file mode 100644 index 0000000000..fc54d15694 --- /dev/null +++ b/src/arch/LowLevelWindowVK/LowLevelWindowVK_X11.h @@ -0,0 +1,44 @@ +#ifndef LOW_LEVEL_WINDOW_VK_X11_H +#define LOW_LEVEL_WINDOW_VK_X11_H + +#include "LowLevelWindowVK.h" +#include +#include + +class LowLevelWindowVK_X11 : public LowLevelWindowVK +{ + public: + LowLevelWindowVK_X11(); + ~LowLevelWindowVK_X11(); + + // Return "" if mode change was successful, otherwise an error message. + // bNewDeviceOut is set true if a new device was created and textures + // need to be reloaded. + std::string TryVideoMode(const VideoModeParams& p, + bool& bNewDeviceOut) override; + void GetDisplaySpecs(DisplaySpecs& out) const override; + void Update() override; + + const ActualVideoModeParams* GetActualVideoModeParams() const override; + bool SupportsFullscreenBorderlessWindow() const override; + + private: + bool NetWMSupported(Display* Dpy, Atom feature) const; + void RestoreOutputConfig(); + + Atom wmDeleteMessage = None; + std::unique_ptr CurrentParams; + bool m_bWasWindowed = true; + + bool m_bUseXRandR12 = false; + int m_iRandRVerMajor = 0; + int m_iRandRVerMinor = 0; + RROutput m_usedCrtc = None; + RRMode m_originalRandRMode = None; + bool m_bChangedScreenSize = false; + SizeID m_iOldSize = None; + Rotation m_OldRotation = RR_Rotate_0; + XRRScreenConfiguration* m_pScreenConfig = nullptr; +}; + +#endif \ No newline at end of file diff --git a/src/arch/arch_default.h b/src/arch/arch_default.h index 041180a0a0..5eec46e95d 100644 --- a/src/arch/arch_default.h +++ b/src/arch/arch_default.h @@ -9,6 +9,11 @@ #define DEFAULT_MOVIE_DRIVER_LIST "FFMpeg,DShow,Null" #define DEFAULT_SOUND_DRIVER_LIST "WaveOut,DirectSound-sw,WDMKS,Null" +#include "LowLevelWindowVK/LowLevelWindowVK_Win32.h" +#ifndef LOW_LEVEL_WINDOW_VK +#define LOW_LEVEL_WINDOW_VK LowLevelWindowVK_Win32 +#endif + #elif defined(__APPLE__) #include "LoadingWindow/LoadingWindow_MacOSX.h" #include "LowLevelWindow/LowLevelWindow_MacOSX.h" @@ -18,7 +23,10 @@ #elif defined(__unix__) #include "LowLevelWindow/LowLevelWindow_X11.h" - +#include "LowLevelWindowVK/LowLevelWindowVK_X11.h" +#ifndef LOW_LEVEL_WINDOW_VK +#define LOW_LEVEL_WINDOW_VK LowLevelWindowVK_X11 +#endif #if defined(HAVE_GTK) #include "LoadingWindow/LoadingWindow_Gtk.h" #endif